code
string
repo_name
string
path
string
language
string
license
string
size
int64
from collections import namedtuple import sqlparse from django.db import DatabaseError from django.db.backends.base.introspection import BaseDatabaseIntrospection from django.db.backends.base.introspection import FieldInfo as BaseFieldInfo from django.db.backends.base.introspection import TableInfo from django.db.models import Index from django.utils.regex_helper import _lazy_re_compile FieldInfo = namedtuple( "FieldInfo", BaseFieldInfo._fields + ("pk", "has_json_constraint") ) field_size_re = _lazy_re_compile(r"^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$") def get_field_size(name): """Extract the size number from a "varchar(11)" type name""" m = field_size_re.search(name) return int(m[1]) if m else None # This light wrapper "fakes" a dictionary interface, because some SQLite data # types include variables in them -- e.g. "varchar(30)" -- and can't be matched # as a simple dictionary lookup. class FlexibleFieldLookupDict: # Maps SQL types to Django Field types. Some of the SQL types have multiple # entries here because SQLite allows for anything and doesn't normalize the # field type; it uses whatever was given. base_data_types_reverse = { "bool": "BooleanField", "boolean": "BooleanField", "smallint": "SmallIntegerField", "smallint unsigned": "PositiveSmallIntegerField", "smallinteger": "SmallIntegerField", "int": "IntegerField", "integer": "IntegerField", "bigint": "BigIntegerField", "integer unsigned": "PositiveIntegerField", "bigint unsigned": "PositiveBigIntegerField", "decimal": "DecimalField", "real": "FloatField", "text": "TextField", "char": "CharField", "varchar": "CharField", "blob": "BinaryField", "date": "DateField", "datetime": "DateTimeField", "time": "TimeField", } def __getitem__(self, key): key = key.lower().split("(", 1)[0].strip() return self.base_data_types_reverse[key] class DatabaseIntrospection(BaseDatabaseIntrospection): data_types_reverse = FlexibleFieldLookupDict() def get_field_type(self, data_type, description): field_type = super().get_field_type(data_type, description) if description.pk and field_type in { "BigIntegerField", "IntegerField", "SmallIntegerField", }: # No support for BigAutoField or SmallAutoField as SQLite treats # all integer primary keys as signed 64-bit integers. return "AutoField" if description.has_json_constraint: return "JSONField" return field_type def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" # Skip the sqlite_sequence system table used for autoincrement key # generation. cursor.execute( """ SELECT name, type FROM sqlite_master WHERE type in ('table', 'view') AND NOT name='sqlite_sequence' ORDER BY name""" ) return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ cursor.execute( "PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name) ) table_info = cursor.fetchall() if not table_info: raise DatabaseError(f"Table {table_name} does not exist (empty pragma).") collations = self._get_column_collations(cursor, table_name) json_columns = set() if self.connection.features.can_introspect_json_field: for line in table_info: column = line[1] json_constraint_sql = '%%json_valid("%s")%%' % column has_json_constraint = cursor.execute( """ SELECT sql FROM sqlite_master WHERE type = 'table' AND name = %s AND sql LIKE %s """, [table_name, json_constraint_sql], ).fetchone() if has_json_constraint: json_columns.add(column) return [ FieldInfo( name, data_type, get_field_size(data_type), None, None, None, not notnull, default, collations.get(name), pk == 1, name in json_columns, ) for cid, name, data_type, notnull, default, pk in table_info ] def get_sequences(self, cursor, table_name, table_fields=()): pk_col = self.get_primary_key_column(cursor, table_name) return [{"table": table_name, "column": pk_col}] def get_relations(self, cursor, table_name): """ Return a dictionary of {column_name: (ref_column_name, ref_table_name)} representing all foreign keys in the given table. """ cursor.execute( "PRAGMA foreign_key_list(%s)" % self.connection.ops.quote_name(table_name) ) return { column_name: (ref_column_name, ref_table_name) for ( _, _, ref_table_name, column_name, ref_column_name, *_, ) in cursor.fetchall() } def get_primary_key_columns(self, cursor, table_name): cursor.execute( "PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name) ) return [name for _, name, *_, pk in cursor.fetchall() if pk] def _parse_column_or_constraint_definition(self, tokens, columns): token = None is_constraint_definition = None field_name = None constraint_name = None unique = False unique_columns = [] check = False check_columns = [] braces_deep = 0 for token in tokens: if token.match(sqlparse.tokens.Punctuation, "("): braces_deep += 1 elif token.match(sqlparse.tokens.Punctuation, ")"): braces_deep -= 1 if braces_deep < 0: # End of columns and constraints for table definition. break elif braces_deep == 0 and token.match(sqlparse.tokens.Punctuation, ","): # End of current column or constraint definition. break # Detect column or constraint definition by first token. if is_constraint_definition is None: is_constraint_definition = token.match( sqlparse.tokens.Keyword, "CONSTRAINT" ) if is_constraint_definition: continue if is_constraint_definition: # Detect constraint name by second token. if constraint_name is None: if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): constraint_name = token.value elif token.ttype == sqlparse.tokens.Literal.String.Symbol: constraint_name = token.value[1:-1] # Start constraint columns parsing after UNIQUE keyword. if token.match(sqlparse.tokens.Keyword, "UNIQUE"): unique = True unique_braces_deep = braces_deep elif unique: if unique_braces_deep == braces_deep: if unique_columns: # Stop constraint parsing. unique = False continue if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): unique_columns.append(token.value) elif token.ttype == sqlparse.tokens.Literal.String.Symbol: unique_columns.append(token.value[1:-1]) else: # Detect field name by first token. if field_name is None: if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): field_name = token.value elif token.ttype == sqlparse.tokens.Literal.String.Symbol: field_name = token.value[1:-1] if token.match(sqlparse.tokens.Keyword, "UNIQUE"): unique_columns = [field_name] # Start constraint columns parsing after CHECK keyword. if token.match(sqlparse.tokens.Keyword, "CHECK"): check = True check_braces_deep = braces_deep elif check: if check_braces_deep == braces_deep: if check_columns: # Stop constraint parsing. check = False continue if token.ttype in (sqlparse.tokens.Name, sqlparse.tokens.Keyword): if token.value in columns: check_columns.append(token.value) elif token.ttype == sqlparse.tokens.Literal.String.Symbol: if token.value[1:-1] in columns: check_columns.append(token.value[1:-1]) unique_constraint = ( { "unique": True, "columns": unique_columns, "primary_key": False, "foreign_key": None, "check": False, "index": False, } if unique_columns else None ) check_constraint = ( { "check": True, "columns": check_columns, "primary_key": False, "unique": False, "foreign_key": None, "index": False, } if check_columns else None ) return constraint_name, unique_constraint, check_constraint, token def _parse_table_constraints(self, sql, columns): # Check constraint parsing is based of SQLite syntax diagram. # https://www.sqlite.org/syntaxdiagrams.html#table-constraint statement = sqlparse.parse(sql)[0] constraints = {} unnamed_constrains_index = 0 tokens = (token for token in statement.flatten() if not token.is_whitespace) # Go to columns and constraint definition for token in tokens: if token.match(sqlparse.tokens.Punctuation, "("): break # Parse columns and constraint definition while True: ( constraint_name, unique, check, end_token, ) = self._parse_column_or_constraint_definition(tokens, columns) if unique: if constraint_name: constraints[constraint_name] = unique else: unnamed_constrains_index += 1 constraints[ "__unnamed_constraint_%s__" % unnamed_constrains_index ] = unique if check: if constraint_name: constraints[constraint_name] = check else: unnamed_constrains_index += 1 constraints[ "__unnamed_constraint_%s__" % unnamed_constrains_index ] = check if end_token.match(sqlparse.tokens.Punctuation, ")"): break return constraints def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Find inline check constraints. try: table_schema = cursor.execute( "SELECT sql FROM sqlite_master WHERE type='table' and name=%s" % (self.connection.ops.quote_name(table_name),) ).fetchone()[0] except TypeError: # table_name is a view. pass else: columns = { info.name for info in self.get_table_description(cursor, table_name) } constraints.update(self._parse_table_constraints(table_schema, columns)) # Get the index info cursor.execute( "PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name) ) for row in cursor.fetchall(): # SQLite 3.8.9+ has 5 columns, however older versions only give 3 # columns. Discard last 2 columns if there. number, index, unique = row[:3] cursor.execute( "SELECT sql FROM sqlite_master " "WHERE type='index' AND name=%s" % self.connection.ops.quote_name(index) ) # There's at most one row. (sql,) = cursor.fetchone() or (None,) # Inline constraints are already detected in # _parse_table_constraints(). The reasons to avoid fetching inline # constraints from `PRAGMA index_list` are: # - Inline constraints can have a different name and information # than what `PRAGMA index_list` gives. # - Not all inline constraints may appear in `PRAGMA index_list`. if not sql: # An inline constraint continue # Get the index info for that index cursor.execute( "PRAGMA index_info(%s)" % self.connection.ops.quote_name(index) ) for index_rank, column_rank, column in cursor.fetchall(): if index not in constraints: constraints[index] = { "columns": [], "primary_key": False, "unique": bool(unique), "foreign_key": None, "check": False, "index": True, } constraints[index]["columns"].append(column) # Add type and column orders for indexes if constraints[index]["index"]: # SQLite doesn't support any index type other than b-tree constraints[index]["type"] = Index.suffix orders = self._get_index_columns_orders(sql) if orders is not None: constraints[index]["orders"] = orders # Get the PK pk_columns = self.get_primary_key_columns(cursor, table_name) if pk_columns: # SQLite doesn't actually give a name to the PK constraint, # so we invent one. This is fine, as the SQLite backend never # deletes PK constraints by name, as you can't delete constraints # in SQLite; we remake the table with a new PK instead. constraints["__primary__"] = { "columns": pk_columns, "primary_key": True, "unique": False, # It's not actually a unique constraint. "foreign_key": None, "check": False, "index": False, } relations = enumerate(self.get_relations(cursor, table_name).items()) constraints.update( { f"fk_{index}": { "columns": [column_name], "primary_key": False, "unique": False, "foreign_key": (ref_table_name, ref_column_name), "check": False, "index": False, } for index, (column_name, (ref_column_name, ref_table_name)) in relations } ) return constraints def _get_index_columns_orders(self, sql): tokens = sqlparse.parse(sql)[0] for token in tokens: if isinstance(token, sqlparse.sql.Parenthesis): columns = str(token).strip("()").split(", ") return ["DESC" if info.endswith("DESC") else "ASC" for info in columns] return None def _get_column_collations(self, cursor, table_name): row = cursor.execute( """ SELECT sql FROM sqlite_master WHERE type = 'table' AND name = %s """, [table_name], ).fetchone() if not row: return {} sql = row[0] columns = str(sqlparse.parse(sql)[0][-1]).strip("()").split(", ") collations = {} for column in columns: tokens = column[1:].split() column_name = tokens[0].strip('"') for index, token in enumerate(tokens): if token == "COLLATE": collation = tokens[index + 1] break else: collation = None collations[column_name] = collation return collations
castiel248/Convert
Lib/site-packages/django/db/backends/sqlite3/introspection.py
Python
mit
17,357
import datetime import decimal import uuid from functools import lru_cache from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, models from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models.constants import OnConflict from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.functional import cached_property class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = "text" cast_data_types = { "DateField": "TEXT", "DateTimeField": "TEXT", } explain_prefix = "EXPLAIN QUERY PLAN" # List of datatypes to that cannot be extracted with JSON_EXTRACT() on # SQLite. Use JSON_TYPE() instead. jsonfield_datatype_values = frozenset(["null", "false", "true"]) def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( "You cannot use Sum, Avg, StdDev, and Variance " "aggregations on date/time fields in sqlite3 " "since date/time is saved as text." ) if ( isinstance(expression, models.Aggregate) and expression.distinct and len(expression.source_expressions) > 1 ): raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, sql, params): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return f"django_date_extract(%s, {sql})", (lookup_type.lower(), *params) def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the list of returned data. """ return cursor.fetchall() def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_date_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_time_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def _convert_tznames_to_sql(self, tzname): if tzname and settings.USE_TZ: return tzname, self.connection.timezone_name return None, None def datetime_cast_date_sql(self, sql, params, tzname): return f"django_datetime_cast_date({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, sql, params, tzname): return f"django_datetime_cast_time({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_extract(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, sql, params): return f"django_time_extract(%s, {sql})", (lookup_type.lower(), *params) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index : index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = "SELECT " + ", ".join(["QUOTE(?)"] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # bind_parameters(state, self->statement, parameters); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set( chain.from_iterable(self._references_graph(table) for table in tables) ) sql = [ "%s %s %s;" % ( style.SQL_KEYWORD("DELETE"), style.SQL_KEYWORD("FROM"), style.SQL_FIELD(self.quote_name(table)), ) for table in tables ] if reset_sequences: sequences = [{"table": table} for table in tables] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): if not sequences: return [] return [ "%s %s %s %s = 0 %s %s %s (%s);" % ( style.SQL_KEYWORD("UPDATE"), style.SQL_TABLE(self.quote_name("sqlite_sequence")), style.SQL_KEYWORD("SET"), style.SQL_FIELD(self.quote_name("seq")), style.SQL_KEYWORD("WHERE"), style.SQL_FIELD(self.quote_name("name")), style.SQL_KEYWORD("IN"), ", ".join( ["'%s'" % sequence_info["table"] for sequence_info in sequences] ), ), ] def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( "SQLite backend does not support timezone-aware datetimes when " "USE_TZ is False." ) return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, "resolve_expression"): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == "DateTimeField": converters.append(self.convert_datetimefield_value) elif internal_type == "DateField": converters.append(self.convert_datefield_value) elif internal_type == "TimeField": converters.append(self.convert_timefield_value) elif internal_type == "DecimalField": converters.append(self.get_decimalfield_converter(expression)) elif internal_type == "UUIDField": converters.append(self.convert_uuidfield_value) elif internal_type == "BooleanField": converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb( -expression.output_field.decimal_places ) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize( quantize_value, context=expression.output_field.context ) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join(f"({sql})" for sql in placeholder_rows_sql) return f"VALUES {values_sql}" def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == "^": return "POWER(%s)" % ",".join(sub_expressions) elif connector == "#": return "BITXOR(%s)" % ",".join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ["+", "-", "*", "/"]: raise DatabaseError("Invalid connector for timedelta: %s." % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError("Too many params for timedelta operations.") return "django_format_dtdelta(%s)" % ", ".join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == "TimeField": return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), params return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), params def insert_statement(self, on_conflict=None): if on_conflict == OnConflict.IGNORE: return "INSERT OR IGNORE INTO" return super().insert_statement(on_conflict=on_conflict) def return_insert_columns(self, fields): # SQLite < 3.35 doesn't support an INSERT...RETURNING statement. if not fields: return "", () columns = [ "%s.%s" % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return "RETURNING %s" % ", ".join(columns), () def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields): if ( on_conflict == OnConflict.UPDATE and self.connection.features.supports_update_conflicts_with_target ): return "ON CONFLICT(%s) DO UPDATE SET %s" % ( ", ".join(map(self.quote_name, unique_fields)), ", ".join( [ f"{field} = EXCLUDED.{field}" for field in map(self.quote_name, update_fields) ] ), ) return super().on_conflict_suffix_sql( fields, on_conflict, update_fields, unique_fields, )
castiel248/Convert
Lib/site-packages/django/db/backends/sqlite3/operations.py
Python
mit
16,961
import copy from decimal import Decimal from django.apps.registry import Apps from django.db import NotSupportedError from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import Statement from django.db.backends.utils import strip_quotes from django.db.models import UniqueConstraint from django.db.transaction import atomic class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_fk = None sql_create_inline_fk = ( "REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED" ) sql_create_column_inline_fk = sql_create_inline_fk sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s" sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)" sql_delete_unique = "DROP INDEX %(name)s" def __enter__(self): # Some SQLite schema alterations need foreign key constraints to be # disabled. Enforce it here for the duration of the schema edition. if not self.connection.disable_constraint_checking(): raise NotSupportedError( "SQLite schema editor cannot be used while foreign key " "constraint checks are enabled. Make sure to disable them " "before entering a transaction.atomic() context because " "SQLite does not support disabling them in the middle of " "a multi-statement transaction." ) return super().__enter__() def __exit__(self, exc_type, exc_value, traceback): self.connection.check_constraints() super().__exit__(exc_type, exc_value, traceback) self.connection.enable_constraint_checking() def quote_value(self, value): # The backend "mostly works" without this function and there are use # cases for compiling Python without the sqlite3 libraries (e.g. # security hardening). try: import sqlite3 value = sqlite3.adapt(value) except ImportError: pass except sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, bool): return str(int(value)) elif isinstance(value, (Decimal, float, int)): return str(value) elif isinstance(value, str): return "'%s'" % value.replace("'", "''") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character. return "X'%s'" % value.hex() else: raise ValueError( "Cannot quote parameter value %r of type %s" % (value, type(value)) ) def prepare_default(self, value): return self.quote_value(value) def _is_referenced_by_fk_constraint( self, table_name, column_name=None, ignore_self=False ): """ Return whether or not the provided table name is referenced by another one. If `column_name` is specified, only references pointing to that column are considered. If `ignore_self` is True, self-referential constraints are ignored. """ with self.connection.cursor() as cursor: for other_table in self.connection.introspection.get_table_list(cursor): if ignore_self and other_table.name == table_name: continue relations = self.connection.introspection.get_relations( cursor, other_table.name ) for constraint_column, constraint_table in relations.values(): if constraint_table == table_name and ( column_name is None or constraint_column == column_name ): return True return False def alter_db_table( self, model, old_db_table, new_db_table, disable_constraints=True ): if ( not self.connection.features.supports_atomic_references_rename and disable_constraints and self._is_referenced_by_fk_constraint(old_db_table) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r table while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % old_db_table ) self.connection.enable_constraint_checking() super().alter_db_table(model, old_db_table, new_db_table) self.connection.disable_constraint_checking() else: super().alter_db_table(model, old_db_table, new_db_table) def alter_field(self, model, old_field, new_field, strict=False): if not self._field_should_be_altered(old_field, new_field): return old_field_name = old_field.name table_name = model._meta.db_table _, old_column_name = old_field.get_attname_column() if ( new_field.name != old_field_name and not self.connection.features.supports_atomic_references_rename and self._is_referenced_by_fk_constraint( table_name, old_column_name, ignore_self=True ) ): if self.connection.in_atomic_block: raise NotSupportedError( ( "Renaming the %r.%r column while in a transaction is not " "supported on SQLite < 3.26 because it would break referential " "integrity. Try adding `atomic = False` to the Migration class." ) % (model._meta.db_table, old_field_name) ) with atomic(self.connection.alias): super().alter_field(model, old_field, new_field, strict=strict) # Follow SQLite's documented procedure for performing changes # that don't affect the on-disk content. # https://sqlite.org/lang_altertable.html#otheralter with self.connection.cursor() as cursor: schema_version = cursor.execute("PRAGMA schema_version").fetchone()[ 0 ] cursor.execute("PRAGMA writable_schema = 1") references_template = ' REFERENCES "%s" ("%%s") ' % table_name new_column_name = new_field.get_attname_column()[1] search = references_template % old_column_name replacement = references_template % new_column_name cursor.execute( "UPDATE sqlite_master SET sql = replace(sql, %s, %s)", (search, replacement), ) cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1)) cursor.execute("PRAGMA writable_schema = 0") # The integrity check will raise an exception and rollback # the transaction if the sqlite_master updates corrupt the # database. cursor.execute("PRAGMA integrity_check") # Perform a VACUUM to refresh the database representation from # the sqlite_master table. with self.connection.cursor() as cursor: cursor.execute("VACUUM") else: super().alter_field(model, old_field, new_field, strict=strict) def _remake_table( self, model, create_field=None, delete_field=None, alter_fields=None ): """ Shortcut to transform a model from old_model into new_model This follows the correct procedure to perform non-rename or column addition operations based on SQLite's documentation https://www.sqlite.org/lang_altertable.html#caution The essential steps are: 1. Create a table with the updated definition called "new__app_model" 2. Copy the data from the existing "app_model" table to the new table 3. Drop the "app_model" table 4. Rename the "new__app_model" table to "app_model" 5. Restore any index of the previous "app_model" table. """ # Self-referential fields must be recreated rather than copied from # the old model to ensure their remote_field.field_name doesn't refer # to an altered field. def is_self_referential(f): return f.is_relation and f.remote_field.model is model # Work out the new fields dict / mapping body = { f.name: f.clone() if is_self_referential(f) else f for f in model._meta.local_concrete_fields } # Since mapping might mix column names and default values, # its values must be already quoted. mapping = { f.column: self.quote_name(f.column) for f in model._meta.local_concrete_fields } # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None alter_fields = alter_fields or [] if getattr(create_field, "primary_key", False) or any( getattr(new_field, "primary_key", False) for _, new_field in alter_fields ): for name, field in list(body.items()): if field.primary_key and not any( # Do not remove the old primary key when an altered field # that introduces a primary key is the same field. name == new_field.name for _, new_field in alter_fields ): field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields if create_field: body[create_field.name] = create_field # Choose a default and insert it into the copy map if not create_field.many_to_many and create_field.concrete: mapping[create_field.column] = self.prepare_default( self.effective_default(create_field), ) # Add in any altered fields for alter_field in alter_fields: old_field, new_field = alter_field body.pop(old_field.name, None) mapping.pop(old_field.column, None) body[new_field.name] = new_field if old_field.null and not new_field.null: case_sql = "coalesce(%(col)s, %(default)s)" % { "col": self.quote_name(old_field.column), "default": self.prepare_default(self.effective_default(new_field)), } mapping[new_field.column] = case_sql else: mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields if delete_field: del body[delete_field.name] del mapping[delete_field.column] # Remove any implicit M2M tables if ( delete_field.many_to_many and delete_field.remote_field.through._meta.auto_created ): return self.delete_model(delete_field.remote_field.through) # Work inside a new app registry apps = Apps() # Work out the new value of unique_together, taking renames into # account unique_together = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Work out the new value for index_together, taking renames into # account index_together = [ [rename_mapping.get(n, n) for n in index] for index in model._meta.index_together ] indexes = model._meta.indexes if delete_field: indexes = [ index for index in indexes if delete_field.name not in index.fields ] constraints = list(model._meta.constraints) # Provide isolated instances of the fields to the new model body so # that the existing model's internals aren't interfered with when # the dummy model is constructed. body_copy = copy.deepcopy(body) # Construct a new model with the new fields to allow self referential # primary key to resolve to. This model won't ever be materialized as a # table and solely exists for foreign key reference resolution purposes. # This wouldn't be required if the schema editor was operating on model # states instead of rendered models. meta_contents = { "app_label": model._meta.app_label, "db_table": model._meta.db_table, "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ type(model._meta.object_name, model.__bases__, body_copy) # Construct a model with a renamed table name. body_copy = copy.deepcopy(body) meta_contents = { "app_label": model._meta.app_label, "db_table": "new__%s" % strip_quotes(model._meta.db_table), "unique_together": unique_together, "index_together": index_together, "indexes": indexes, "constraints": constraints, "apps": apps, } meta = type("Meta", (), meta_contents) body_copy["Meta"] = meta body_copy["__module__"] = model.__module__ new_model = type("New%s" % model._meta.object_name, model.__bases__, body_copy) # Create a new table with the updated schema. self.create_model(new_model) # Copy data from the old table into the new table self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_model._meta.db_table), ", ".join(self.quote_name(x) for x in mapping), ", ".join(mapping.values()), self.quote_name(model._meta.db_table), ) ) # Delete the old table to make way for the new self.delete_model(model, handle_autom2m=False) # Rename the new table to take way for the old self.alter_db_table( new_model, new_model._meta.db_table, model._meta.db_table, disable_constraints=False, ) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super().delete_model(model) else: # Delete the table (and only that) self.execute( self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), } ) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table( model._meta.db_table ): self.deferred_sql.remove(sql) def add_field(self, model, field): """Create a field on a model.""" # Special-case implicit M2M tables. if field.many_to_many and field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) elif ( # Primary keys and unique fields are not supported in ALTER TABLE # ADD COLUMN. field.primary_key or field.unique or # Fields with default values cannot by handled by ALTER TABLE ADD # COLUMN statement because DROP DEFAULT is not supported in # ALTER TABLE. not field.null or self.effective_default(field) is not None ): self._remake_table(model, create_field=field) else: super().add_field(model, field) def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if field.many_to_many: # For implicit M2M tables, delete the auto-created table if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # For explicit "through" M2M fields, do nothing elif ( self.connection.features.can_alter_table_drop_column # Primary keys, unique fields, indexed fields, and foreign keys are # not supported in ALTER TABLE DROP COLUMN. and not field.primary_key and not field.unique and not field.db_index and not (field.remote_field and field.db_constraint) ): super().remove_field(model, field) # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return self._remake_table(model, delete_field=field) def _alter_field( self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False, ): """Perform a "physical" (non-ManyToMany) field update.""" # Use "ALTER TABLE ... RENAME COLUMN" if only the column name # changed and there aren't any constraints. if ( self.connection.features.can_alter_table_rename_column and old_field.column != new_field.column and self.column_sql(model, old_field) == self.column_sql(model, new_field) and not ( old_field.remote_field and old_field.db_constraint or new_field.remote_field and new_field.db_constraint ) ): return self.execute( self._rename_field_sql( model._meta.db_table, old_field, new_field, new_type ) ) # Alter by remaking table self._remake_table(model, alter_fields=[(old_field, new_field)]) # Rebuild tables with FKs pointing to this field. old_collation = old_db_params.get("collation") new_collation = new_db_params.get("collation") if new_field.unique and ( old_type != new_type or old_collation != new_collation ): related_models = set() opts = new_field.model._meta for remote_field in opts.related_objects: # Ignore self-relationship since the table was already rebuilt. if remote_field.related_model == model: continue if not remote_field.many_to_many: if remote_field.field_name == new_field.name: related_models.add(remote_field.related_model) elif new_field.primary_key and remote_field.through._meta.auto_created: related_models.add(remote_field.through) if new_field.primary_key: for many_to_many in opts.many_to_many: # Ignore self-relationship since the table was already rebuilt. if many_to_many.related_model == model: continue if many_to_many.remote_field.through._meta.auto_created: related_models.add(many_to_many.remote_field.through) for related_model in related_models: self._remake_table(related_model) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" if ( old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table ): # The field name didn't change, but some options did, so we have to # propagate this altering. self._remake_table( old_field.remote_field.through, alter_fields=[ ( # The field that points to the target model is needed, # so that table can be remade with the new m2m field - # this is m2m_reverse_field_name(). old_field.remote_field.through._meta.get_field( old_field.m2m_reverse_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_reverse_field_name() ), ), ( # The field that points to the model itself is needed, # so that table can be remade with the new self field - # this is m2m_field_name(). old_field.remote_field.through._meta.get_field( old_field.m2m_field_name() ), new_field.remote_field.through._meta.get_field( new_field.m2m_field_name() ), ), ], ) return # Make a new through table self.create_model(new_field.remote_field.through) # Copy the data across self.execute( "INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.remote_field.through._meta.db_table), ", ".join( [ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ] ), ", ".join( [ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ] ), self.quote_name(old_field.remote_field.through._meta.db_table), ) ) # Delete the old through table self.delete_model(old_field.remote_field.through) def add_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().add_constraint(model, constraint) else: self._remake_table(model) def remove_constraint(self, model, constraint): if isinstance(constraint, UniqueConstraint) and ( constraint.condition or constraint.contains_expressions or constraint.include or constraint.deferrable ): super().remove_constraint(model, constraint) else: self._remake_table(model) def _collate_sql(self, collation): return "COLLATE " + collation
castiel248/Convert
Lib/site-packages/django/db/backends/sqlite3/schema.py
Python
mit
24,430
import datetime import decimal import functools import logging import time from contextlib import contextmanager from django.db import NotSupportedError from django.utils.crypto import md5 from django.utils.dateparse import parse_time logger = logging.getLogger("django.db.backends") class CursorWrapper: def __init__(self, cursor, db): self.cursor = cursor self.db = db WRAP_ERROR_ATTRS = frozenset(["fetchone", "fetchmany", "fetchall", "nextset"]) def __getattr__(self, attr): cursor_attr = getattr(self.cursor, attr) if attr in CursorWrapper.WRAP_ERROR_ATTRS: return self.db.wrap_database_errors(cursor_attr) else: return cursor_attr def __iter__(self): with self.db.wrap_database_errors: yield from self.cursor def __enter__(self): return self def __exit__(self, type, value, traceback): # Close instead of passing through to avoid backend-specific behavior # (#17671). Catch errors liberally because errors in cleanup code # aren't useful. try: self.close() except self.db.Database.Error: pass # The following methods cannot be implemented in __getattr__, because the # code must run when the method is invoked, not just when it is accessed. def callproc(self, procname, params=None, kparams=None): # Keyword parameters for callproc aren't supported in PEP 249, but the # database driver may support them (e.g. cx_Oracle). if kparams is not None and not self.db.features.supports_callproc_kwargs: raise NotSupportedError( "Keyword parameters for callproc are not supported on this " "database backend." ) self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None and kparams is None: return self.cursor.callproc(procname) elif kparams is None: return self.cursor.callproc(procname, params) else: params = params or () return self.cursor.callproc(procname, params, kparams) def execute(self, sql, params=None): return self._execute_with_wrappers( sql, params, many=False, executor=self._execute ) def executemany(self, sql, param_list): return self._execute_with_wrappers( sql, param_list, many=True, executor=self._executemany ) def _execute_with_wrappers(self, sql, params, many, executor): context = {"connection": self.db, "cursor": self} for wrapper in reversed(self.db.execute_wrappers): executor = functools.partial(wrapper, executor) return executor(sql, params, many, context) def _execute(self, sql, params, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None: # params default might be backend specific. return self.cursor.execute(sql) else: return self.cursor.execute(sql, params) def _executemany(self, sql, param_list, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: return self.cursor.executemany(sql, param_list) class CursorDebugWrapper(CursorWrapper): # XXX callproc isn't instrumented at this time. def execute(self, sql, params=None): with self.debug_sql(sql, params, use_last_executed_query=True): return super().execute(sql, params) def executemany(self, sql, param_list): with self.debug_sql(sql, param_list, many=True): return super().executemany(sql, param_list) @contextmanager def debug_sql( self, sql=None, params=None, use_last_executed_query=False, many=False ): start = time.monotonic() try: yield finally: stop = time.monotonic() duration = stop - start if use_last_executed_query: sql = self.db.ops.last_executed_query(self.cursor, sql, params) try: times = len(params) if many else "" except TypeError: # params could be an iterator. times = "?" self.db.queries_log.append( { "sql": "%s times: %s" % (times, sql) if many else sql, "time": "%.3f" % duration, } ) logger.debug( "(%.3f) %s; args=%s; alias=%s", duration, sql, params, self.db.alias, extra={ "duration": duration, "sql": sql, "params": params, "alias": self.db.alias, }, ) @contextmanager def debug_transaction(connection, sql): start = time.monotonic() try: yield finally: if connection.queries_logged: stop = time.monotonic() duration = stop - start connection.queries_log.append( { "sql": "%s" % sql, "time": "%.3f" % duration, } ) logger.debug( "(%.3f) %s; args=%s; alias=%s", duration, sql, None, connection.alias, extra={ "duration": duration, "sql": sql, "alias": connection.alias, }, ) def split_tzname_delta(tzname): """ Split a time zone name into a 3-tuple of (name, sign, offset). """ for sign in ["+", "-"]: if sign in tzname: name, offset = tzname.rsplit(sign, 1) if offset and parse_time(offset): return name, sign, offset return tzname, None, None ############################################### # Converters from database (string) to Python # ############################################### def typecast_date(s): return ( datetime.date(*map(int, s.split("-"))) if s else None ) # return None if s is null def typecast_time(s): # does NOT store time zone information if not s: return None hour, minutes, seconds = s.split(":") if "." in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split(".") else: microseconds = "0" return datetime.time( int(hour), int(minutes), int(seconds), int((microseconds + "000000")[:6]) ) def typecast_timestamp(s): # does NOT store time zone information # "2005-07-29 15:48:00.590358-05" # "2005-07-29 09:56:00-05" if not s: return None if " " not in s: return typecast_date(s) d, t = s.split() # Remove timezone information. if "-" in t: t, _ = t.split("-", 1) elif "+" in t: t, _ = t.split("+", 1) dates = d.split("-") times = t.split(":") seconds = times[2] if "." in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split(".") else: microseconds = "0" return datetime.datetime( int(dates[0]), int(dates[1]), int(dates[2]), int(times[0]), int(times[1]), int(seconds), int((microseconds + "000000")[:6]), ) ############################################### # Converters from Python to database (string) # ############################################### def split_identifier(identifier): """ Split an SQL identifier into a two element tuple of (namespace, name). The identifier could be a table, column, or sequence name might be prefixed by a namespace. """ try: namespace, name = identifier.split('"."') except ValueError: namespace, name = "", identifier return namespace.strip('"'), name.strip('"') def truncate_name(identifier, length=None, hash_len=4): """ Shorten an SQL identifier to a repeatable mangled version with the given length. If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE, truncate the table portion only. """ namespace, name = split_identifier(identifier) if length is None or len(name) <= length: return identifier digest = names_digest(name, length=hash_len) return "%s%s%s" % ( '%s"."' % namespace if namespace else "", name[: length - hash_len], digest, ) def names_digest(*args, length): """ Generate a 32-bit digest of a set of arguments that can be used to shorten identifying names. """ h = md5(usedforsecurity=False) for arg in args: h.update(arg.encode()) return h.hexdigest()[:length] def format_number(value, max_digits, decimal_places): """ Format a number into a string with the requisite number of digits and decimal places. """ if value is None: return None context = decimal.getcontext().copy() if max_digits is not None: context.prec = max_digits if decimal_places is not None: value = value.quantize( decimal.Decimal(1).scaleb(-decimal_places), context=context ) else: context.traps[decimal.Rounded] = 1 value = context.create_decimal(value) return "{:f}".format(value) def strip_quotes(table_name): """ Strip quotes off of quoted table names to make them safe for use in index names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming scheme) becomes 'USER"."TABLE'. """ has_quotes = table_name.startswith('"') and table_name.endswith('"') return table_name[1:-1] if has_quotes else table_name
castiel248/Convert
Lib/site-packages/django/db/backends/utils.py
Python
mit
10,002
from .migration import Migration, swappable_dependency # NOQA from .operations import * # NOQA
castiel248/Convert
Lib/site-packages/django/db/migrations/__init__.py
Python
mit
97
import functools import re from collections import defaultdict from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import ( COMPILED_REGEX_TYPE, RegexObject, resolve_relation, ) from django.utils.topological_sort import stable_topological_sort class MigrationAutodetector: """ Take a pair of ProjectStates and compare them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Take a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return ( obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords), ) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, "deconstruct"): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as the related fields change during renames). """ fields_def = [] for name, field in sorted(fields.items()): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: deconstruction[2].pop("to", None) fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Return a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # Then go through that list, order it, and split into migrations to # resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} self.altered_indexes = {} self.altered_constraints = {} self.renamed_fields = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_model_keys = set() self.old_proxy_keys = set() self.old_unmanaged_keys = set() self.new_model_keys = set() self.new_proxy_keys = set() self.new_unmanaged_keys = set() for (app_label, model_name), model_state in self.from_state.models.items(): if not model_state.options.get("managed", True): self.old_unmanaged_keys.add((app_label, model_name)) elif app_label not in self.from_state.real_apps: if model_state.options.get("proxy"): self.old_proxy_keys.add((app_label, model_name)) else: self.old_model_keys.add((app_label, model_name)) for (app_label, model_name), model_state in self.to_state.models.items(): if not model_state.options.get("managed", True): self.new_unmanaged_keys.add((app_label, model_name)) elif app_label not in self.from_state.real_apps or ( convert_apps and app_label in convert_apps ): if model_state.options.get("proxy"): self.new_proxy_keys.add((app_label, model_name)) else: self.new_model_keys.add((app_label, model_name)) self.from_state.resolve_fields_and_relations() self.to_state.resolve_fields_and_relations() # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() self.generate_altered_db_table_comment() # Create the renamed fields and store them in self.renamed_fields. # They are used by create_altered_indexes(), generate_altered_fields(), # generate_removed_altered_index/unique_together(), and # generate_altered_index/unique_together(). self.create_renamed_fields() # Create the altered indexes and store them in self.altered_indexes. # This avoids the same computation in generate_removed_indexes() # and generate_added_indexes(). self.create_altered_indexes() self.create_altered_constraints() # Generate index removal operations before field is removed self.generate_removed_constraints() self.generate_removed_indexes() # Generate field renaming operations. self.generate_renamed_fields() self.generate_renamed_indexes() # Generate removal of foo together. self.generate_removed_altered_unique_together() self.generate_removed_altered_index_together() # RemovedInDjango51Warning. # Generate field operations. self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_order_with_respect_to() self.generate_altered_unique_together() self.generate_altered_index_together() # RemovedInDjango51Warning. self.generate_added_indexes() self.generate_added_constraints() self.generate_altered_db_table() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists and a list of the fields that used through models in the old state so dependencies can be made from the through model deletion to the field that uses it. """ self.kept_model_keys = self.old_model_keys & self.new_model_keys self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys self.through_users = {} self.old_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.from_state.models[ app_label, self.renamed_models.get((app_label, model_name), model_name) ].fields } self.new_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.to_state.models[app_label, model_name].fields } def _generate_through_model_map(self): """Through model map generation.""" for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields.items(): if hasattr(field, "remote_field") and getattr( field.remote_field, "through", None ): through_key = resolve_relation( field.remote_field.through, app_label, model_name ) self.through_users[through_key] = ( app_label, old_model_name, field_name, ) @staticmethod def _resolve_dependency(dependency): """ Return the resolved dependency and a boolean denoting whether or not it was swappable. """ if dependency[0] != "__setting__": return dependency, False resolved_app_label, resolved_object_name = getattr( settings, dependency[1] ).split(".") return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True def _build_migration_list(self, graph=None): """ Chop the lists of operations up into migrations with dependencies on each other. Do this by going through an app's list of operations until one is found that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). Then chop off the operations before it into a migration and move onto the next app. If the loops completes without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: # Temporarily resolve the swappable dependency to # prevent circular references. While keeping the # dependency checks on the resolved model, add the # swappable dependencies. original_dep = dep dep, is_swappable_dep = self._resolve_dependency(dep) if dep[0] != app_label: # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get( dep[0], [] ): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add( (original_dep[0], original_dep[1]) ) elif dep[0] in self.migrations: operation_dependencies.add( (dep[0], self.migrations[dep[0]][-1].name) ) else: # If we can't find the other app, we add a # first/last dependency, but only if we've # already been through once and checked # everything. if chop_mode: # If the app already exists, we add a # dependency on the last migration, as # we don't know which migration # contains the target field. If it's # not yet migrated or has no # migrations, we use __first__. if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add( graph.leaf_nodes(dep[0])[0] ) else: operation_dependencies.add( (dep[0], "__first__") ) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) del self.generated_operations[app_label][0] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type( "Migration", (Migration,), {"operations": [], "dependencies": []}, ) instance = subclass( "auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label, ) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = ( chopped + self.generated_operations[app_label] ) new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError( "Cannot resolve operation dependencies: %r" % self.generated_operations ) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. Reordering may be needed so FKs work nicely inside the same app. """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: # Resolve intra-app dependencies to handle circular # references involving a swappable model. dep = self._resolve_dependency(dep)[0] if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort( ops, dependency_graph ) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for migrations in self.migrations.values(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize( migration.operations, app_label ) def check_dependency(self, operation, dependency): """ Return True if the given operation depends on the given dependency, False otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance( operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether), ) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency,)) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are # (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Place potential swappable models first in lists of created models (only real way to solve #22783). """ try: model_state = self.to_state.models[item] base_names = { base if isinstance(base, str) else base.__name__ for base in model_state.bases } string_version = "%s.%s" % (item[0], item[1]) if ( model_state.options.get("swappable") or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Find any renamed models, generate the operations for them, and remove the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = self.new_model_keys - self.old_model_keys for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = self.old_model_keys - self.new_model_keys for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[ rem_app_label, rem_model_name ] rem_model_fields_def = self.only_relation_agnostic_fields( rem_model_state.fields ) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model( rem_model_state, model_state ): dependencies = [] fields = list(model_state.fields.values()) + [ field.remote_field for relations in self.to_state.relations[ app_label, model_name ].values() for field in relations.values() ] for field in fields: if field.is_relation: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ), dependencies=dependencies, ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = "%s.%s" % ( rem_model_state.app_label, rem_model_state.name_lower, ) self.renamed_models_rel[ renamed_models_rel_key ] = "%s.%s" % ( model_state.app_label, model_state.name_lower, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.add((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (these are optimized later, if possible). Defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = self.old_model_keys | self.old_unmanaged_keys added_models = self.new_model_keys - old_keys added_unmanaged_models = self.new_unmanaged_keys - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True), ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] # Gather related fields related_fields = {} primary_key_rel = None for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field_name] = field if getattr(field.remote_field, "through", None): related_fields[field_name] = field # Are there indexes/unique|index_together to defer? indexes = model_state.options.pop("indexes") constraints = model_state.options.pop("constraints") unique_together = model_state.options.pop("unique_together", None) # RemovedInDjango51Warning. index_together = model_state.options.pop("index_together", None) order_with_respect_to = model_state.options.pop( "order_with_respect_to", None ) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the removal of base fields if the new model has # a field with the same name. old_base_model_state = self.from_state.models.get( (base_app_label, base_name) ) new_base_model_state = self.to_state.models.get( (base_app_label, base_name) ) if old_base_model_state and new_base_model_state: removed_base_fields = ( set(old_base_model_state.fields) .difference( new_base_model_state.fields, ) .intersection(model_state.fields) ) for removed_base_field in removed_base_fields: dependencies.append( (base_app_label, base_name, removed_base_field, False) ) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append( resolve_relation( primary_key_rel, app_label, model_name, ) + (None, True) ) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[ d for d in model_state.fields.items() if d[0] not in related_fields ], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_state.options.get("managed", True): continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ], ) related_dependencies = [ (app_label, model_name, name, True) for name in sorted(related_fields) ] related_dependencies.append((app_label, model_name, None, True)) for index in indexes: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), dependencies=related_dependencies, ) for constraint in constraints: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), dependencies=related_dependencies, ) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies, ) # RemovedInDjango51Warning. if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies, ) # Fix relationships if the model changed from a proxy model to a # concrete model. relations = self.to_state.relations if (app_label, model_name) in self.old_proxy_keys: for related_model_key, related_fields in relations[ app_label, model_name ].items(): related_model_state = self.to_state.models[related_model_key] for related_field_name, related_field in related_fields.items(): self.add_operation( related_model_state.app_label, operations.AlterField( model_name=related_model_state.name, name=related_field_name, field=related_field, ), dependencies=[(app_label, model_name, None, True)], ) def generate_created_proxies(self): """ Make CreateModel statements for proxy models. Use the same statements as that way there's less code duplication, but for proxy models it's safe to skip all the pointless field stuff and chuck out an operation. """ added = self.new_proxy_keys - self.old_proxy_keys for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (these are optimized later, if possible). Also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = self.new_model_keys | self.new_unmanaged_keys deleted_models = self.old_model_keys - new_keys deleted_unmanaged_models = self.old_unmanaged_keys - new_keys all_deleted_models = chain( sorted(deleted_models), sorted(deleted_unmanaged_models) ) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] # Gather related fields related_fields = {} for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: related_fields[field_name] = field if getattr(field.remote_field, "through", None): related_fields[field_name] = field # Generate option removal first unique_together = model_state.options.pop("unique_together", None) # RemovedInDjango51Warning. index_together = model_state.options.pop("index_together", None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ), ) # RemovedInDjango51Warning. if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ), ) # Then remove each related field for name in sorted(related_fields): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ), ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] relations = self.from_state.relations for ( related_object_app_label, object_name, ), relation_related_fields in relations[app_label, model_name].items(): for field_name, field in relation_related_fields.items(): dependencies.append( (related_object_app_label, object_name, field_name, False), ) if not field.many_to_many: dependencies.append( ( related_object_app_label, object_name, field_name, "alter", ), ) for name in sorted(related_fields): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append( (through_user[0], through_user[1], through_user[2], False) ) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """Make DeleteModel options for proxy models.""" deleted = self.old_proxy_keys - self.new_proxy_keys for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def create_renamed_fields(self): """Work out renamed fields.""" self.renamed_operations = [] old_field_keys = self.old_field_keys.copy() for app_label, model_name, field_name in sorted( self.new_field_keys - old_field_keys ): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] field = new_model_state.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted( old_field_keys - self.new_field_keys ): if rem_app_label == app_label and rem_model_name == model_name: old_field = old_model_state.get_field(rem_field_name) old_field_dec = self.deep_deconstruct(old_field) if ( field.remote_field and field.remote_field.model and "to" in old_field_dec[2] ): old_rel_to = old_field_dec[2]["to"] if old_rel_to in self.renamed_models_rel: old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to] old_field.set_attributes_from_name(rem_field_name) old_db_column = old_field.get_attname_column()[1] if old_field_dec == field_dec or ( # Was the field renamed and db_column equal to the # old field's column added? old_field_dec[0:2] == field_dec[0:2] and dict(old_field_dec[2], db_column=old_db_column) == field_dec[2] ): if self.questioner.ask_rename( model_name, rem_field_name, field_name, field ): self.renamed_operations.append( ( rem_app_label, rem_model_name, old_field.db_column, rem_field_name, app_label, model_name, field, field_name, ) ) old_field_keys.remove( (rem_app_label, rem_model_name, rem_field_name) ) old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[ app_label, model_name, field_name ] = rem_field_name break def generate_renamed_fields(self): """Generate RenameField operations.""" for ( rem_app_label, rem_model_name, rem_db_column, rem_field_name, app_label, model_name, field, field_name, ) in self.renamed_operations: # A db_column mismatch requires a prior noop AlterField for the # subsequent RenameField to be a noop on attempts at preserving the # old name. if rem_db_column != field.db_column: altered_field = field.clone() altered_field.name = rem_field_name self.add_operation( app_label, operations.AlterField( model_name=model_name, name=rem_field_name, field=altered_field, ), ) self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ), ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) def generate_added_fields(self): """Make AddField operations.""" for app_label, model_name, field_name in sorted( self.new_field_keys - self.old_field_keys ): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.to_state.models[app_label, model_name].get_field(field_name) # Adding a field always depends at least on its removal. dependencies = [(app_label, model_name, field_name, False)] # Fields that are foreignkeys/m2ms depend on stuff. if field.remote_field and field.remote_field.model: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. time_fields = (models.DateField, models.DateTimeField, models.TimeField) preserve_default = ( field.null or field.has_default() or field.many_to_many or (field.blank and field.empty_strings_allowed) or (isinstance(field, time_fields) and field.auto_now) ) if not preserve_default: field = field.clone() if isinstance(field, time_fields) and field.auto_now_add: field.default = self.questioner.ask_auto_now_add_addition( field_name, model_name ) else: field.default = self.questioner.ask_not_null_addition( field_name, model_name ) if ( field.unique and field.default is not models.NOT_PROVIDED and callable(field.default) ): self.questioner.ask_unique_callable_default_addition(field_name, model_name) self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """Make RemoveField operations.""" for app_label, model_name, field_name in sorted( self.old_field_keys - self.new_field_keys ): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Make AlterField operations, or possibly RemovedField/AddField if alter isn't possible. """ for app_label, model_name, field_name in sorted( self.old_field_keys & self.new_field_keys ): # Did the field change? old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_field_name = self.renamed_fields.get( (app_label, model_name, field_name), field_name ) old_field = self.from_state.models[app_label, old_model_name].get_field( old_field_name ) new_field = self.to_state.models[app_label, model_name].get_field( field_name ) dependencies = [] # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr( new_field.remote_field, "model", None ): rename_key = resolve_relation( new_field.remote_field.model, app_label, model_name ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model # Handle ForeignKey which can only have a single to_field. remote_field_name = getattr(new_field.remote_field, "field_name", None) if remote_field_name: to_field_rename_key = rename_key + (remote_field_name,) if to_field_rename_key in self.renamed_fields: # Repoint both model and field name because to_field # inclusion in ForeignKey.deconstruct() is based on # both. new_field.remote_field.model = old_field.remote_field.model new_field.remote_field.field_name = ( old_field.remote_field.field_name ) # Handle ForeignObjects which can have multiple from_fields/to_fields. from_fields = getattr(new_field, "from_fields", None) if from_fields: from_rename_key = (app_label, model_name) new_field.from_fields = tuple( [ self.renamed_fields.get( from_rename_key + (from_field,), from_field ) for from_field in from_fields ] ) new_field.to_fields = tuple( [ self.renamed_fields.get(rename_key + (to_field,), to_field) for to_field in new_field.to_fields ] ) dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, new_field, self.to_state, ) ) if hasattr(new_field, "remote_field") and getattr( new_field.remote_field, "through", None ): rename_key = resolve_relation( new_field.remote_field.through, app_label, model_name ) if rename_key in self.renamed_models: new_field.remote_field.through = old_field.remote_field.through old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) # If the field was confirmed to be renamed it means that only # db_column was allowed to change which generate_renamed_fields() # already accounts for by adding an AlterField operation. if old_field_dec != new_field_dec and old_field_name == field_name: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if ( old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many ): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration( field_name, model_name ) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def create_altered_indexes(self): option_name = operations.AddIndex.option_name self.renamed_index_together_values = defaultdict(list) for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_indexes = old_model_state.options[option_name] new_indexes = new_model_state.options[option_name] added_indexes = [idx for idx in new_indexes if idx not in old_indexes] removed_indexes = [idx for idx in old_indexes if idx not in new_indexes] renamed_indexes = [] # Find renamed indexes. remove_from_added = [] remove_from_removed = [] for new_index in added_indexes: new_index_dec = new_index.deconstruct() new_index_name = new_index_dec[2].pop("name") for old_index in removed_indexes: old_index_dec = old_index.deconstruct() old_index_name = old_index_dec[2].pop("name") # Indexes are the same except for the names. if ( new_index_dec == old_index_dec and new_index_name != old_index_name ): renamed_indexes.append((old_index_name, new_index_name, None)) remove_from_added.append(new_index) remove_from_removed.append(old_index) # Find index_together changed to indexes. for ( old_value, new_value, index_together_app_label, index_together_model_name, dependencies, ) in self._get_altered_foo_together_operations( operations.AlterIndexTogether.option_name ): if ( app_label != index_together_app_label or model_name != index_together_model_name ): continue removed_values = old_value.difference(new_value) for removed_index_together in removed_values: renamed_index_together_indexes = [] for new_index in added_indexes: _, args, kwargs = new_index.deconstruct() # Ensure only 'fields' are defined in the Index. if ( not args and new_index.fields == list(removed_index_together) and set(kwargs) == {"name", "fields"} ): renamed_index_together_indexes.append(new_index) if len(renamed_index_together_indexes) == 1: renamed_index = renamed_index_together_indexes[0] remove_from_added.append(renamed_index) renamed_indexes.append( (None, renamed_index.name, removed_index_together) ) self.renamed_index_together_values[ index_together_app_label, index_together_model_name ].append(removed_index_together) # Remove renamed indexes from the lists of added and removed # indexes. added_indexes = [ idx for idx in added_indexes if idx not in remove_from_added ] removed_indexes = [ idx for idx in removed_indexes if idx not in remove_from_removed ] self.altered_indexes.update( { (app_label, model_name): { "added_indexes": added_indexes, "removed_indexes": removed_indexes, "renamed_indexes": renamed_indexes, } } ) def generate_added_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes["added_indexes"]: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), ) def generate_removed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes["removed_indexes"]: self.add_operation( app_label, operations.RemoveIndex( model_name=model_name, name=index.name, ), ) def generate_renamed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for old_index_name, new_index_name, old_fields in alt_indexes[ "renamed_indexes" ]: self.add_operation( app_label, operations.RenameIndex( model_name=model_name, new_name=new_index_name, old_name=old_index_name, old_fields=old_fields, ), ) def create_altered_constraints(self): option_name = operations.AddConstraint.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_constraints = old_model_state.options[option_name] new_constraints = new_model_state.options[option_name] add_constraints = [c for c in new_constraints if c not in old_constraints] rem_constraints = [c for c in old_constraints if c not in new_constraints] self.altered_constraints.update( { (app_label, model_name): { "added_constraints": add_constraints, "removed_constraints": rem_constraints, } } ) def generate_added_constraints(self): for ( app_label, model_name, ), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints["added_constraints"]: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), ) def generate_removed_constraints(self): for ( app_label, model_name, ), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints["removed_constraints"]: self.add_operation( app_label, operations.RemoveConstraint( model_name=model_name, name=constraint.name, ), ) @staticmethod def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state): remote_field_model = None if hasattr(field.remote_field, "model"): remote_field_model = field.remote_field.model else: relations = project_state.relations[app_label, model_name] for (remote_app_label, remote_model_name), fields in relations.items(): if any( field == related_field.remote_field for related_field in fields.values() ): remote_field_model = f"{remote_app_label}.{remote_model_name}" break # Account for FKs to swappable models swappable_setting = getattr(field, "swappable_setting", None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label, dep_object_name = resolve_relation( remote_field_model, app_label, model_name, ) dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None): through_app_label, through_object_name = resolve_relation( field.remote_field.through, app_label, model_name, ) dependencies.append((through_app_label, through_object_name, None, True)) return dependencies def _get_altered_foo_together_operations(self, option_name): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) old_value = ( { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } if old_value else set() ) new_value = new_model_state.options.get(option_name) new_value = set(new_value) if new_value else set() if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = new_model_state.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) yield ( old_value, new_value, app_label, model_name, dependencies, ) def _generate_removed_altered_foo_together(self, operation): for ( old_value, new_value, app_label, model_name, dependencies, ) in self._get_altered_foo_together_operations(operation.option_name): if operation == operations.AlterIndexTogether: old_value = { value for value in old_value if value not in self.renamed_index_together_values[app_label, model_name] } removal_value = new_value.intersection(old_value) if removal_value or old_value: self.add_operation( app_label, operation( name=model_name, **{operation.option_name: removal_value} ), dependencies=dependencies, ) def generate_removed_altered_unique_together(self): self._generate_removed_altered_foo_together(operations.AlterUniqueTogether) # RemovedInDjango51Warning. def generate_removed_altered_index_together(self): self._generate_removed_altered_foo_together(operations.AlterIndexTogether) def _generate_altered_foo_together(self, operation): for ( old_value, new_value, app_label, model_name, dependencies, ) in self._get_altered_foo_together_operations(operation.option_name): removal_value = new_value.intersection(old_value) if new_value != removal_value: self.add_operation( app_label, operation(name=model_name, **{operation.option_name: new_value}), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) # RemovedInDjango51Warning. def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get("db_table") new_db_table_name = new_model_state.options.get("db_table") if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ), ) def generate_altered_db_table_comment(self): models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_comment = old_model_state.options.get("db_table_comment") new_db_table_comment = new_model_state.options.get("db_table_comment") if old_db_table_comment != new_db_table_comment: self.add_operation( app_label, operations.AlterModelTableComment( name=model_name, table_comment=new_db_table_comment, ), ) def generate_altered_options(self): """ Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ), ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.options.get( "order_with_respect_to" ) != new_model_state.options.get("order_with_respect_to"): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append( ( app_label, model_name, new_model_state.options["order_with_respect_to"], True, ) ) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get( "order_with_respect_to" ), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get( (app_label, model_name), model_name ) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ), ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Take a result from changes() and a MigrationGraph, and fix the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) new_name_parts = ["%04i" % next_number] if migration_name: new_name_parts.append(migration_name) elif i == 0 and not app_leaf: new_name_parts.append("initial") else: new_name_parts.append(migration.suggest_name()[:100]) new_name = "_".join(new_name_parts) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for migrations in changes.values(): for migration in migrations: migration.dependencies = [ name_map.get(d, d) for d in migration.dependencies ] return changes def _trim_to_apps(self, changes, app_labels): """ Take changes from arrange_for_graph() and set of app labels, and return a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) required_apps.update( *[app_dependencies.get(app_label, ()) for app_label in required_apps] ) # Remove all migrations that aren't needed for app_label in list(changes): if app_label not in required_apps: del changes[app_label] return changes @classmethod def parse_number(cls, name): """ Given a migration name, try to extract a number from the beginning of it. For a squashed migration such as '0001_squashed_0004…', return the second number. If no number is found, return None. """ if squashed_match := re.search(r".*_squashed_(\d+)", name): return int(squashed_match[1]) match = re.match(r"^\d+", name) if match: return int(match[0]) return None
castiel248/Convert
Lib/site-packages/django/db/migrations/autodetector.py
Python
mit
78,977
from django.db import DatabaseError class AmbiguityError(Exception): """More than one migration matches a name prefix.""" pass class BadMigrationError(Exception): """There's a bad migration (unreadable/bad format/etc.).""" pass class CircularDependencyError(Exception): """There's an impossible-to-resolve circular dependency.""" pass class InconsistentMigrationHistory(Exception): """An applied migration has some of its dependencies not applied.""" pass class InvalidBasesError(ValueError): """A model's base classes can't be resolved.""" pass class IrreversibleError(RuntimeError): """An irreversible migration is about to be reversed.""" pass class NodeNotFoundError(LookupError): """An attempt on a node is made that is not available in the graph.""" def __init__(self, message, node, origin=None): self.message = message self.origin = origin self.node = node def __str__(self): return self.message def __repr__(self): return "NodeNotFoundError(%r)" % (self.node,) class MigrationSchemaMissing(DatabaseError): pass class InvalidMigrationPlan(ValueError): pass
castiel248/Convert
Lib/site-packages/django/db/migrations/exceptions.py
Python
mit
1,204
from django.apps.registry import apps as global_apps from django.db import migrations, router from .exceptions import InvalidMigrationPlan from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: """ End-to-end migration execution - load migrations and run them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, return a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = {} else: applied = dict(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # If the target is missing, it's likely a replaced migration. # Reload the graph without replacements. if ( self.loader.replace_migrations and target not in self.loader.graph.node_map ): self.loader.replace_migrations = False self.loader.build_graph() return self.migration_plan(targets, clean_start=clean_start) # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied[migration] = self.loader.graph.nodes[migration] return plan def _create_project_state(self, with_applied_migrations=False): """ Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. """ state = ProjectState(real_apps=self.loader.unmigrated_apps) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): """ Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ # The django_migrations table must be present to record applied # migrations, but don't create it if there are no migrations to apply. if plan == []: if not self.recorder.has_table(): return self._create_project_state(with_applied_migrations=False) else: self.recorder.ensure_schema() if plan is None: plan = self.migration_plan(targets) # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan( self.loader.graph.leaf_nodes(), clean_start=True ) all_forwards = all(not backwards for mig, backwards in plan) all_backwards = all(backwards for mig, backwards in plan) if not plan: if state is None: # The resulting state should include applied migrations. state = self._create_project_state(with_applied_migrations=True) elif all_forwards == all_backwards: # This should only happen if there's a mixed plan raise InvalidMigrationPlan( "Migration plans with both forwards and backwards migrations " "are not supported. Please split your migration process into " "separate plans of only forwards OR backwards migrations.", plan, ) elif all_forwards: if state is None: # The resulting state should still include applied migrations. state = self._create_project_state(with_applied_migrations=True) state = self._migrate_all_forwards( state, plan, full_plan, fake=fake, fake_initial=fake_initial ) else: # No need to check for `elif all_backwards` here, as that condition # would always evaluate to true. state = self._migrate_all_backwards(plan, full_plan, fake=fake) self.check_replacements() return state def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): """ Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan. """ migrations_to_run = {m[0] for m in plan} for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from these sets so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if "apps" not in state.__dict__: if self.progress_callback: self.progress_callback("render_start") state.apps # Render all -- performance critical if self.progress_callback: self.progress_callback("render_success") state = self.apply_migration( state, migration, fake=fake, fake_initial=fake_initial ) migrations_to_run.remove(migration) return state def _migrate_all_backwards(self, plan, full_plan, fake): """ Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan. """ migrations_to_run = {m[0] for m in plan} # Holds all migration states prior to the migrations being unapplied states = {} state = self._create_project_state() applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } if self.progress_callback: self.progress_callback("render_start") for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from this set so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if "apps" not in state.__dict__: state.apps # Render all -- performance critical # The state before this migration states[migration] = state # The old state keeps as-is, we continue with the new state state = migration.mutate_state(state, preserve=True) migrations_to_run.remove(migration) elif migration in applied_migrations: # Only mutate the state if the migration is actually applied # to make sure the resulting state doesn't include changes # from unrelated migrations. migration.mutate_state(state, preserve=False) if self.progress_callback: self.progress_callback("render_success") for migration, _ in plan: self.unapply_migration(states[migration], migration, fake=fake) applied_migrations.remove(migration) # Generate the post migration state by starting from the state before # the last migration is unapplied and mutating it to include all the # remaining applied migrations. last_unapplied_migration = plan[-1][0] state = states[last_unapplied_migration] for index, (migration, _) in enumerate(full_plan): if migration == last_unapplied_migration: for migration, _ in full_plan[index:]: if migration in applied_migrations: migration.mutate_state(state, preserve=False) break return state def apply_migration(self, state, migration, fake=False, fake_initial=False): """Run a migration forwards.""" migration_recorded = False if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor( atomic=migration.atomic ) as schema_editor: state = migration.apply(state, schema_editor) if not schema_editor.deferred_sql: self.record_migration(migration) migration_recorded = True if not migration_recorded: self.record_migration(migration) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def record_migration(self, migration): # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) def unapply_migration(self, state, migration, fake=False): """Run a migration backwards.""" if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor( atomic=migration.atomic ) as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, also record individual statuses. if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Test whether a migration has been implicitly applied - that the tables or columns it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel and AddField). """ def should_skip_detecting_model(migration, model): """ No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database. """ return ( model._meta.proxy or not model._meta.managed or not router.allow_migrate( self.connection.alias, migration.app_label, model_name=model._meta.model_name, ) ) if migration.initial is None: # Bail if the migration isn't the first one in its app if any(app == migration.app_label for app, name in migration.dependencies): return False, project_state elif migration.initial is False: # Bail if it's NOT an initial migration return False, project_state if project_state is None: after_state = self.loader.project_state( (migration.app_label, migration.name), at_end=True ) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_model_migration = False found_add_field_migration = False fold_identifier_case = self.connection.features.ignores_table_name_case with self.connection.cursor() as cursor: existing_table_names = set( self.connection.introspection.table_names(cursor) ) if fold_identifier_case: existing_table_names = { name.casefold() for name in existing_table_names } # Make sure all create model and add field operations are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue db_table = model._meta.db_table if fold_identifier_case: db_table = db_table.casefold() if db_table not in existing_table_names: return False, project_state found_create_model_migration = True elif isinstance(operation, migrations.AddField): model = apps.get_model(migration.app_label, operation.model_name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue table = model._meta.db_table field = model._meta.get_field(operation.name) # Handle implicit many-to-many tables created by AddField. if field.many_to_many: through_db_table = field.remote_field.through._meta.db_table if fold_identifier_case: through_db_table = through_db_table.casefold() if through_db_table not in existing_table_names: return False, project_state else: found_add_field_migration = True continue with self.connection.cursor() as cursor: columns = self.connection.introspection.get_table_description( cursor, table ) for column in columns: field_column = field.column column_name = column.name if fold_identifier_case: column_name = column_name.casefold() field_column = field_column.casefold() if column_name == field_column: found_add_field_migration = True break else: return False, project_state # If we get this far and we found at least one CreateModel or AddField # migration, the migration is considered implicitly applied. return (found_create_model_migration or found_add_field_migration), after_state
castiel248/Convert
Lib/site-packages/django/db/migrations/executor.py
Python
mit
18,923
from functools import total_ordering from django.db.migrations.state import ProjectState from .exceptions import CircularDependencyError, NodeNotFoundError @total_ordering class Node: """ A single node in the migration graph. Contains direct links to adjacent nodes in either direction. """ def __init__(self, key): self.key = key self.children = set() self.parents = set() def __eq__(self, other): return self.key == other def __lt__(self, other): return self.key < other def __hash__(self): return hash(self.key) def __getitem__(self, item): return self.key[item] def __str__(self): return str(self.key) def __repr__(self): return "<%s: (%r, %r)>" % (self.__class__.__name__, self.key[0], self.key[1]) def add_child(self, child): self.children.add(child) def add_parent(self, parent): self.parents.add(parent) class DummyNode(Node): """ A node that doesn't correspond to a migration file on disk. (A squashed migration that was removed, for example.) After the migration graph is processed, all dummy nodes should be removed. If there are any left, a nonexistent dependency error is raised. """ def __init__(self, key, origin, error_message): super().__init__(key) self.origin = origin self.error_message = error_message def raise_error(self): raise NodeNotFoundError(self.error_message, self.key, origin=self.origin) class MigrationGraph: """ Represent the digraph of all migrations in a project. Each migration is a node, and each dependency is an edge. There are no implicit dependencies between numbered migrations - the numbering is merely a convention to aid file listing. Every new numbered migration has a declared dependency to the previous number, meaning that VCS branch merges can be detected and resolved. Migrations files can be marked as replacing another set of migrations - this is to support the "squash" feature. The graph handler isn't responsible for these; instead, the code to load them in here should examine the migration files and if the replaced migrations are all either unapplied or not present, it should ignore the replaced ones, load in just the replacing migration, and repoint any dependencies that pointed to the replaced migrations to point to the replacing one. A node should be a tuple: (app_path, migration_name). The tree special-cases things within an app - namely, root nodes and leaf nodes ignore dependencies to other apps. """ def __init__(self): self.node_map = {} self.nodes = {} def add_node(self, key, migration): assert key not in self.node_map node = Node(key) self.node_map[key] = node self.nodes[key] = migration def add_dummy_node(self, key, origin, error_message): node = DummyNode(key, origin, error_message) self.node_map[key] = node self.nodes[key] = None def add_dependency(self, migration, child, parent, skip_validation=False): """ This may create dummy nodes if they don't yet exist. If `skip_validation=True`, validate_consistency() should be called afterward. """ if child not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " child node %r" % (migration, child) ) self.add_dummy_node(child, migration, error_message) if parent not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " parent node %r" % (migration, parent) ) self.add_dummy_node(parent, migration, error_message) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) if not skip_validation: self.validate_consistency() def remove_replaced_nodes(self, replacement, replaced): """ Remove each of the `replaced` nodes (when they exist). Any dependencies that were referencing them are changed to reference the `replacement` node instead. """ # Cast list of replaced keys to set to speed up lookup later. replaced = set(replaced) try: replacement_node = self.node_map[replacement] except KeyError as err: raise NodeNotFoundError( "Unable to find replacement node %r. It was either never added" " to the migration graph, or has been removed." % (replacement,), replacement, ) from err for replaced_key in replaced: self.nodes.pop(replaced_key, None) replaced_node = self.node_map.pop(replaced_key, None) if replaced_node: for child in replaced_node.children: child.parents.remove(replaced_node) # We don't want to create dependencies between the replaced # node and the replacement node as this would lead to # self-referencing on the replacement node at a later iteration. if child.key not in replaced: replacement_node.add_child(child) child.add_parent(replacement_node) for parent in replaced_node.parents: parent.children.remove(replaced_node) # Again, to avoid self-referencing. if parent.key not in replaced: replacement_node.add_parent(parent) parent.add_child(replacement_node) def remove_replacement_node(self, replacement, replaced): """ The inverse operation to `remove_replaced_nodes`. Almost. Remove the replacement node `replacement` and remap its child nodes to `replaced` - the list of nodes it would have replaced. Don't remap its parent nodes as they are expected to be correct already. """ self.nodes.pop(replacement, None) try: replacement_node = self.node_map.pop(replacement) except KeyError as err: raise NodeNotFoundError( "Unable to remove replacement node %r. It was either never added" " to the migration graph, or has been removed already." % (replacement,), replacement, ) from err replaced_nodes = set() replaced_nodes_parents = set() for key in replaced: replaced_node = self.node_map.get(key) if replaced_node: replaced_nodes.add(replaced_node) replaced_nodes_parents |= replaced_node.parents # We're only interested in the latest replaced node, so filter out # replaced nodes that are parents of other replaced nodes. replaced_nodes -= replaced_nodes_parents for child in replacement_node.children: child.parents.remove(replacement_node) for replaced_node in replaced_nodes: replaced_node.add_child(child) child.add_parent(replaced_node) for parent in replacement_node.parents: parent.children.remove(replacement_node) # NOTE: There is no need to remap parent dependencies as we can # assume the replaced nodes already have the correct ancestry. def validate_consistency(self): """Ensure there are no dummy nodes remaining in the graph.""" [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)] def forwards_plan(self, target): """ Given a node, return a list of which previous nodes (dependencies) must be applied, ending with the node itself. This is the list you would follow if applying the migrations to a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target,), target) return self.iterative_dfs(self.node_map[target]) def backwards_plan(self, target): """ Given a node, return a list of which dependent nodes (dependencies) must be unapplied, ending with the node itself. This is the list you would follow if removing the migrations from a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target,), target) return self.iterative_dfs(self.node_map[target], forwards=False) def iterative_dfs(self, start, forwards=True): """Iterative depth-first search for finding dependencies.""" visited = [] visited_set = set() stack = [(start, False)] while stack: node, processed = stack.pop() if node in visited_set: pass elif processed: visited_set.add(node) visited.append(node.key) else: stack.append((node, True)) stack += [ (n, False) for n in sorted(node.parents if forwards else node.children) ] return visited def root_nodes(self, app=None): """ Return all root nodes - that is, nodes with no dependencies inside their app. These are the starting point for an app. """ roots = set() for node in self.nodes: if all(key[0] != node[0] for key in self.node_map[node].parents) and ( not app or app == node[0] ): roots.add(node) return sorted(roots) def leaf_nodes(self, app=None): """ Return all leaf nodes - that is, nodes with no dependents in their app. These are the "most current" version of an app's schema. Having more than one per app is technically an error, but one that gets handled further up, in the interactive command - it's usually the result of a VCS merge and needs some user input. """ leaves = set() for node in self.nodes: if all(key[0] != node[0] for key in self.node_map[node].children) and ( not app or app == node[0] ): leaves.add(node) return sorted(leaves) def ensure_not_cyclic(self): # Algo from GvR: # https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html todo = set(self.nodes) while todo: node = todo.pop() stack = [node] while stack: top = stack[-1] for child in self.node_map[top].children: # Use child.key instead of child to speed up the frequent # hashing. node = child.key if node in stack: cycle = stack[stack.index(node) :] raise CircularDependencyError( ", ".join("%s.%s" % n for n in cycle) ) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() def __str__(self): return "Graph: %s nodes, %s edges" % self._nodes_and_edges() def __repr__(self): nodes, edges = self._nodes_and_edges() return "<%s: nodes=%s, edges=%s>" % (self.__class__.__name__, nodes, edges) def _nodes_and_edges(self): return len(self.nodes), sum( len(node.parents) for node in self.node_map.values() ) def _generate_plan(self, nodes, at_end): plan = [] for node in nodes: for migration in self.forwards_plan(node): if migration not in plan and (at_end or migration not in nodes): plan.append(migration) return plan def make_state(self, nodes=None, at_end=True, real_apps=None): """ Given a migration node or nodes, return a complete ProjectState for it. If at_end is False, return the state before the migration has run. If nodes is not provided, return the overall most current project state. """ if nodes is None: nodes = list(self.leaf_nodes()) if not nodes: return ProjectState() if not isinstance(nodes[0], tuple): nodes = [nodes] plan = self._generate_plan(nodes, at_end) project_state = ProjectState(real_apps=real_apps) for node in plan: project_state = self.nodes[node].mutate_state(project_state, preserve=False) return project_state def __contains__(self, node): return node in self.nodes
castiel248/Convert
Lib/site-packages/django/db/migrations/graph.py
Python
mit
13,055
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = "migrations" class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__( self, connection, load=True, ignore_no_migrations=False, replace_migrations=True, ): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations self.replace_migrations = replace_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return "%s.%s" % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ModuleNotFoundError as e: if (explicit and self.ignore_no_migrations) or ( not explicit and MIGRATIONS_MODULE_NAME in e.name.split(".") ): self.unmigrated_apps.add(app_config.label) continue raise else: # Module is not a package (e.g. migrations.py). if not hasattr(module, "__path__"): self.unmigrated_apps.add(app_config.label) continue # Empty directories are namespaces. Namespace packages have no # __file__ and don't use a list for __path__. See # https://docs.python.org/3/reference/import.html#namespace-packages if getattr(module, "__file__", None) is None and not isinstance( module.__path__, list ): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) self.migrated_apps.add(app_config.label) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in "_~" } # Load migrations for migration_name in migration_names: migration_path = "%s.%s" % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if "bad magic number" in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[ app_config.label, migration_name ] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith( name_prefix ): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError( f"There is no migration for '{app_label}' with the prefix " f"'{name_prefix}'" ) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError( "Dependency on app with no migrations: %s" % key[0] ) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != "__first__": self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. if self.replace_migrations: for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement # targets. applied_statuses = [ (target in self.applied_migrations) for target in migration.replaces ] # The replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of # its replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is # partially applied. Remove it from the graph and remap # dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any( candidate in self.graph.nodes for candidate in candidates ) if not is_replaced: tries = ", ".join("%s.%s" % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node, ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all( m in applied for m in self.replacements[parent].replaces ): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return { app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps } def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state( nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps ) def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor( collect_sql=True, atomic=migration.atomic ) as schema_editor: if state is None: state = self.project_state( (migration.app_label, migration.name), at_end=False ) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements
castiel248/Convert
Lib/site-packages/django/db/migrations/loader.py
Python
mit
16,863
import re from django.db.migrations.utils import get_migration_name_timestamp from django.db.transaction import atomic from .exceptions import IrreversibleError class Migration: """ The base class for all migrations. Migration files will import this from django.db.migrations.Migration and subclass it as a class called Migration. It will have one or more of the following attributes: - operations: A list of Operation instances, probably from django.db.migrations.operations - dependencies: A list of tuples of (app_path, migration_name) - run_before: A list of tuples of (app_path, migration_name) - replaces: A list of migration_names Note that all migrations come out of migrations and into the Loader or Graph as instances, having been initialized with their app label and name. """ # Operations to apply during this migration, in order. operations = [] # Other migrations that should be run before this migration. # Should be a list of (app, migration_name). dependencies = [] # Other migrations that should be run after this one (i.e. have # this migration added to their dependencies). Useful to make third-party # apps' migrations run after your AUTH_USER replacement, for example. run_before = [] # Migration names in this app that this migration replaces. If this is # non-empty, this migration will only be applied if all these migrations # are not applied. replaces = [] # Is this an initial migration? Initial migrations are skipped on # --fake-initial if the table or fields already exist. If None, check if # the migration has any dependencies to determine if there are dependencies # to tell if db introspection needs to be done. If True, always perform # introspection. If False, never perform introspection. initial = None # Whether to wrap the whole migration in a transaction. Only has an effect # on database backends which support transactional DDL. atomic = True def __init__(self, name, app_label): self.name = name self.app_label = app_label # Copy dependencies & other attrs as we might mutate them at runtime self.operations = list(self.__class__.operations) self.dependencies = list(self.__class__.dependencies) self.run_before = list(self.__class__.run_before) self.replaces = list(self.__class__.replaces) def __eq__(self, other): return ( isinstance(other, Migration) and self.name == other.name and self.app_label == other.app_label ) def __repr__(self): return "<Migration %s.%s>" % (self.app_label, self.name) def __str__(self): return "%s.%s" % (self.app_label, self.name) def __hash__(self): return hash("%s.%s" % (self.app_label, self.name)) def mutate_state(self, project_state, preserve=True): """ Take a ProjectState and return a new one with the migration's operations applied to it. Preserve the original object state by default and return a mutated state from a copy. """ new_state = project_state if preserve: new_state = project_state.clone() for operation in self.operations: operation.state_forwards(self.app_label, new_state) return new_state def apply(self, project_state, schema_editor, collect_sql=False): """ Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a forwards order. Return the resulting project state for efficient reuse by following Migrations. """ for operation in self.operations: # If this operation cannot be represented as SQL, place a comment # there instead if collect_sql: schema_editor.collected_sql.append("--") schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" ) continue collected_sql_before = len(schema_editor.collected_sql) # Save the state before the operation has run old_state = project_state.clone() operation.state_forwards(self.app_label, project_state) # Run the operation atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) else: # Normal behaviour operation.database_forwards( self.app_label, schema_editor, old_state, project_state ) if collect_sql and collected_sql_before == len(schema_editor.collected_sql): schema_editor.collected_sql.append("-- (no-op)") return project_state def unapply(self, project_state, schema_editor, collect_sql=False): """ Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a reverse order. The backwards migration process consists of two phases: 1. The intermediate states from right before the first until right after the last operation inside this migration are preserved. 2. The operations are applied in reverse order using the states recorded in step 1. """ # Construct all the intermediate states we need for a reverse migration to_run = [] new_state = project_state # Phase 1 for operation in self.operations: # If it's irreversible, error out if not operation.reversible: raise IrreversibleError( "Operation %s in %s is not reversible" % (operation, self) ) # Preserve new state from previous run to not tamper the same state # over all operations new_state = new_state.clone() old_state = new_state.clone() operation.state_forwards(self.app_label, new_state) to_run.insert(0, (operation, old_state, new_state)) # Phase 2 for operation, to_state, from_state in to_run: if collect_sql: schema_editor.collected_sql.append("--") schema_editor.collected_sql.append("-- %s" % operation.describe()) schema_editor.collected_sql.append("--") if not operation.reduces_to_sql: schema_editor.collected_sql.append( "-- THIS OPERATION CANNOT BE WRITTEN AS SQL" ) continue collected_sql_before = len(schema_editor.collected_sql) atomic_operation = operation.atomic or ( self.atomic and operation.atomic is not False ) if not schema_editor.atomic_migration and atomic_operation: # Force a transaction on a non-transactional-DDL backend or an # atomic operation inside a non-atomic migration. with atomic(schema_editor.connection.alias): operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) else: # Normal behaviour operation.database_backwards( self.app_label, schema_editor, from_state, to_state ) if collect_sql and collected_sql_before == len(schema_editor.collected_sql): schema_editor.collected_sql.append("-- (no-op)") return project_state def suggest_name(self): """ Suggest a name for the operations this migration might represent. Names are not guaranteed to be unique, but put some effort into the fallback name to avoid VCS conflicts if possible. """ if self.initial: return "initial" raw_fragments = [op.migration_name_fragment for op in self.operations] fragments = [re.sub(r"\W+", "_", name) for name in raw_fragments if name] if not fragments or len(fragments) != len(self.operations): return "auto_%s" % get_migration_name_timestamp() name = fragments[0] for fragment in fragments[1:]: new_name = f"{name}_{fragment}" if len(new_name) > 52: name = f"{name}_and_more" break name = new_name return name class SwappableTuple(tuple): """ Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file. """ def __new__(cls, value, setting): self = tuple.__new__(cls, value) self.setting = setting return self def swappable_dependency(value): """Turn a setting value into a dependency.""" return SwappableTuple((value.split(".", 1)[0], "__first__"), value)
castiel248/Convert
Lib/site-packages/django/db/migrations/migration.py
Python
mit
9,767
from .fields import AddField, AlterField, RemoveField, RenameField from .models import ( AddConstraint, AddIndex, AlterIndexTogether, AlterModelManagers, AlterModelOptions, AlterModelTable, AlterModelTableComment, AlterOrderWithRespectTo, AlterUniqueTogether, CreateModel, DeleteModel, RemoveConstraint, RemoveIndex, RenameIndex, RenameModel, ) from .special import RunPython, RunSQL, SeparateDatabaseAndState __all__ = [ "CreateModel", "DeleteModel", "AlterModelTable", "AlterModelTableComment", "AlterUniqueTogether", "RenameModel", "AlterIndexTogether", "AlterModelOptions", "AddIndex", "RemoveIndex", "RenameIndex", "AddField", "RemoveField", "AlterField", "RenameField", "AddConstraint", "RemoveConstraint", "SeparateDatabaseAndState", "RunSQL", "RunPython", "AlterOrderWithRespectTo", "AlterModelManagers", ]
castiel248/Convert
Lib/site-packages/django/db/migrations/operations/__init__.py
Python
mit
964
from django.db import router class Operation: """ Base class for migration operations. It's responsible for both mutating the in-memory model state (see db/migrations/state.py) to represent what it performs, as well as actually performing it against a live database. Note that some operations won't modify memory state at all (e.g. data copying operations), and some will need their modifications to be optionally specified by the user (e.g. custom Python code snippets) Due to the way this class deals with deconstruction, it should be considered immutable. """ # If this migration can be run in reverse. # Some operations are impossible to reverse, like deleting data. reversible = True # Can this migration be represented as SQL? (things like RunPython cannot) reduces_to_sql = True # Should this operation be forced as atomic even on backends with no # DDL transaction support (i.e., does it have no DDL, like RunPython) atomic = False # Should this operation be considered safe to elide and optimize across? elidable = False serialization_expand_args = [] def __new__(cls, *args, **kwargs): # We capture the arguments to make returning them trivial self = object.__new__(cls) self._constructor_args = (args, kwargs) return self def deconstruct(self): """ Return a 3-tuple of class import path (or just name if it lives under django.db.migrations), positional arguments, and keyword arguments. """ return ( self.__class__.__name__, self._constructor_args[0], self._constructor_args[1], ) def state_forwards(self, app_label, state): """ Take the state from the previous migration, and mutate it so that it matches what this migration would perform. """ raise NotImplementedError( "subclasses of Operation must provide a state_forwards() method" ) def database_forwards(self, app_label, schema_editor, from_state, to_state): """ Perform the mutation on the database schema in the normal (forwards) direction. """ raise NotImplementedError( "subclasses of Operation must provide a database_forwards() method" ) def database_backwards(self, app_label, schema_editor, from_state, to_state): """ Perform the mutation on the database schema in the reverse direction - e.g. if this were CreateModel, it would in fact drop the model's table. """ raise NotImplementedError( "subclasses of Operation must provide a database_backwards() method" ) def describe(self): """ Output a brief summary of what the action does. """ return "%s: %s" % (self.__class__.__name__, self._constructor_args) @property def migration_name_fragment(self): """ A filename part suitable for automatically naming a migration containing this operation, or None if not applicable. """ return None def references_model(self, name, app_label): """ Return True if there is a chance this operation references the given model name (as a string), with an app label for accuracy. Used for optimization. If in doubt, return True; returning a false positive will merely make the optimizer a little less efficient, while returning a false negative may result in an unusable optimized migration. """ return True def references_field(self, model_name, name, app_label): """ Return True if there is a chance this operation references the given field name, with an app label for accuracy. Used for optimization. If in doubt, return True. """ return self.references_model(model_name, app_label) def allow_migrate_model(self, connection_alias, model): """ Return whether or not a model may be migrated. This is a thin wrapper around router.allow_migrate_model() that preemptively rejects any proxy, swapped out, or unmanaged model. """ if not model._meta.can_migrate(connection_alias): return False return router.allow_migrate_model(connection_alias, model) def reduce(self, operation, app_label): """ Return either a list of operations the actual operation should be replaced with or a boolean that indicates whether or not the specified operation can be optimized across. """ if self.elidable: return [operation] elif operation.elidable: return [self] return False def __repr__(self): return "<%s %s%s>" % ( self.__class__.__name__, ", ".join(map(repr, self._constructor_args[0])), ",".join(" %s=%r" % x for x in self._constructor_args[1].items()), )
castiel248/Convert
Lib/site-packages/django/db/migrations/operations/base.py
Python
mit
5,082
from django.db.migrations.utils import field_references from django.db.models import NOT_PROVIDED from django.utils.functional import cached_property from .base import Operation class FieldOperation(Operation): def __init__(self, model_name, name, field=None): self.model_name = model_name self.name = name self.field = field @cached_property def model_name_lower(self): return self.model_name.lower() @cached_property def name_lower(self): return self.name.lower() def is_same_model_operation(self, operation): return self.model_name_lower == operation.model_name_lower def is_same_field_operation(self, operation): return ( self.is_same_model_operation(operation) and self.name_lower == operation.name_lower ) def references_model(self, name, app_label): name_lower = name.lower() if name_lower == self.model_name_lower: return True if self.field: return bool( field_references( (app_label, self.model_name_lower), self.field, (app_label, name_lower), ) ) return False def references_field(self, model_name, name, app_label): model_name_lower = model_name.lower() # Check if this operation locally references the field. if model_name_lower == self.model_name_lower: if name == self.name: return True elif ( self.field and hasattr(self.field, "from_fields") and name in self.field.from_fields ): return True # Check if this operation remotely references the field. if self.field is None: return False return bool( field_references( (app_label, self.model_name_lower), self.field, (app_label, model_name_lower), name, ) ) def reduce(self, operation, app_label): return super().reduce(operation, app_label) or not operation.references_field( self.model_name, self.name, app_label ) class AddField(FieldOperation): """Add a field to a model.""" def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { "model_name": self.model_name, "name": self.name, "field": self.field, } if self.preserve_default is not True: kwargs["preserve_default"] = self.preserve_default return (self.__class__.__name__, [], kwargs) def state_forwards(self, app_label, state): state.add_field( app_label, self.model_name_lower, self.name, self.field, self.preserve_default, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) field = to_model._meta.get_field(self.name) if not self.preserve_default: field.default = self.field.default schema_editor.add_field( from_model, field, ) if not self.preserve_default: field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field( from_model, from_model._meta.get_field(self.name) ) def describe(self): return "Add field %s to %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "%s_%s" % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): if isinstance(operation, FieldOperation) and self.is_same_field_operation( operation ): if isinstance(operation, AlterField): return [ AddField( model_name=self.model_name, name=operation.name, field=operation.field, ), ] elif isinstance(operation, RemoveField): return [] elif isinstance(operation, RenameField): return [ AddField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label) class RemoveField(FieldOperation): """Remove a field from a model.""" def deconstruct(self): kwargs = { "model_name": self.model_name, "name": self.name, } return (self.__class__.__name__, [], kwargs) def state_forwards(self, app_label, state): state.remove_field(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field( from_model, from_model._meta.get_field(self.name) ) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.add_field(from_model, to_model._meta.get_field(self.name)) def describe(self): return "Remove field %s from %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "remove_%s_%s" % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): from .models import DeleteModel if ( isinstance(operation, DeleteModel) and operation.name_lower == self.model_name_lower ): return [operation] return super().reduce(operation, app_label) class AlterField(FieldOperation): """ Alter a field's database column (e.g. null, max_length) to the provided new field. """ def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { "model_name": self.model_name, "name": self.name, "field": self.field, } if self.preserve_default is not True: kwargs["preserve_default"] = self.preserve_default return (self.__class__.__name__, [], kwargs) def state_forwards(self, app_label, state): state.alter_field( app_label, self.model_name_lower, self.name, self.field, self.preserve_default, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) from_field = from_model._meta.get_field(self.name) to_field = to_model._meta.get_field(self.name) if not self.preserve_default: to_field.default = self.field.default schema_editor.alter_field(from_model, from_field, to_field) if not self.preserve_default: to_field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Alter field %s on %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "alter_%s_%s" % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): if isinstance(operation, RemoveField) and self.is_same_field_operation( operation ): return [operation] elif ( isinstance(operation, RenameField) and self.is_same_field_operation(operation) and self.field.db_column is None ): return [ operation, AlterField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label) class RenameField(FieldOperation): """Rename a field on the model. Might affect db_column too.""" def __init__(self, model_name, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(model_name, old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "model_name": self.model_name, "old_name": self.old_name, "new_name": self.new_name, } return (self.__class__.__name__, [], kwargs) def state_forwards(self, app_label, state): state.rename_field( app_label, self.model_name_lower, self.old_name, self.new_name ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.old_name), to_model._meta.get_field(self.new_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.new_name), to_model._meta.get_field(self.old_name), ) def describe(self): return "Rename field %s on %s to %s" % ( self.old_name, self.model_name, self.new_name, ) @property def migration_name_fragment(self): return "rename_%s_%s_%s" % ( self.old_name_lower, self.model_name_lower, self.new_name_lower, ) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def reduce(self, operation, app_label): if ( isinstance(operation, RenameField) and self.is_same_model_operation(operation) and self.new_name_lower == operation.old_name_lower ): return [ RenameField( self.model_name, self.old_name, operation.new_name, ), ] # Skip `FieldOperation.reduce` as we want to run `references_field` # against self.old_name and self.new_name. return super(FieldOperation, self).reduce(operation, app_label) or not ( operation.references_field(self.model_name, self.old_name, app_label) or operation.references_field(self.model_name, self.new_name, app_label) )
castiel248/Convert
Lib/site-packages/django/db/migrations/operations/fields.py
Python
mit
12,692
from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.migrations.utils import field_references, resolve_relation from django.db.models.options import normalize_together from django.utils.functional import cached_property from .fields import AddField, AlterField, FieldOperation, RemoveField, RenameField def _check_for_duplicates(arg_name, objs): used_vals = set() for val in objs: if val in used_vals: raise ValueError( "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) ) used_vals.add(val) class ModelOperation(Operation): def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def references_model(self, name, app_label): return name.lower() == self.name_lower def reduce(self, operation, app_label): return super().reduce(operation, app_label) or self.can_reduce_through( operation, app_label ) def can_reduce_through(self, operation, app_label): return not operation.references_model(self.name, app_label) class CreateModel(ModelOperation): """Create a model's table.""" serialization_expand_args = ["fields", "options", "managers"] def __init__(self, name, fields, options=None, bases=None, managers=None): self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] super().__init__(name) # Sanity-check that there are no duplicated field names, bases, or # manager names _check_for_duplicates("fields", (name for name, _ in self.fields)) _check_for_duplicates( "bases", ( base._meta.label_lower if hasattr(base, "_meta") else base.lower() if isinstance(base, str) else base for base in self.bases ), ) _check_for_duplicates("managers", (name for name, _ in self.managers)) def deconstruct(self): kwargs = { "name": self.name, "fields": self.fields, } if self.options: kwargs["options"] = self.options if self.bases and self.bases != (models.Model,): kwargs["bases"] = self.bases if self.managers and self.managers != [("objects", models.Manager())]: kwargs["managers"] = self.managers return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.add_model( ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), ) ) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ( "proxy " if self.options.get("proxy", False) else "", self.name, ) @property def migration_name_fragment(self): return self.name_lower def references_model(self, name, app_label): name_lower = name.lower() if name_lower == self.name_lower: return True # Check we didn't inherit from the model reference_model_tuple = (app_label, name_lower) for base in self.bases: if ( base is not models.Model and isinstance(base, (models.base.ModelBase, str)) and resolve_relation(base, app_label) == reference_model_tuple ): return True # Check we have no FKs/M2Ms with it for _name, field in self.fields: if field_references( (app_label, self.name_lower), field, reference_model_tuple ): return True return False def reduce(self, operation, app_label): if ( isinstance(operation, DeleteModel) and self.name_lower == operation.name_lower and not self.options.get("proxy", False) ): return [] elif ( isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower ): return [ CreateModel( operation.new_name, fields=self.fields, options=self.options, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterModelOptions) and self.name_lower == operation.name_lower ): options = {**self.options, **operation.options} for key in operation.ALTER_OPTION_KEYS: if key not in operation.options: options.pop(key, None) return [ CreateModel( self.name, fields=self.fields, options=options, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterModelManagers) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options=self.options, bases=self.bases, managers=operation.managers, ), ] elif ( isinstance(operation, AlterTogetherOptionOperation) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options={ **self.options, **{operation.option_name: operation.option_value}, }, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, AlterOrderWithRespectTo) and self.name_lower == operation.name_lower ): return [ CreateModel( self.name, fields=self.fields, options={ **self.options, "order_with_respect_to": operation.order_with_respect_to, }, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower ): if isinstance(operation, AddField): return [ CreateModel( self.name, fields=self.fields + [(operation.name, operation.field)], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterField): return [ CreateModel( self.name, fields=[ (n, operation.field if n == operation.name else v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveField): options = self.options.copy() for option_name in ("unique_together", "index_together"): option = options.pop(option_name, None) if option: option = set( filter( bool, ( tuple( f for f in fields if f != operation.name_lower ) for fields in option ), ) ) if option: options[option_name] = option order_with_respect_to = options.get("order_with_respect_to") if order_with_respect_to == operation.name_lower: del options["order_with_respect_to"] return [ CreateModel( self.name, fields=[ (n, v) for n, v in self.fields if n.lower() != operation.name_lower ], options=options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameField): options = self.options.copy() for option_name in ("unique_together", "index_together"): option = options.get(option_name) if option: options[option_name] = { tuple( operation.new_name if f == operation.old_name else f for f in fields ) for fields in option } order_with_respect_to = options.get("order_with_respect_to") if order_with_respect_to == operation.old_name: options["order_with_respect_to"] = operation.new_name return [ CreateModel( self.name, fields=[ (operation.new_name if n == operation.old_name else n, v) for n, v in self.fields ], options=options, bases=self.bases, managers=self.managers, ), ] elif ( isinstance(operation, IndexOperation) and self.name_lower == operation.model_name_lower ): if isinstance(operation, AddIndex): return [ CreateModel( self.name, fields=self.fields, options={ **self.options, "indexes": [ *self.options.get("indexes", []), operation.index, ], }, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveIndex): options_indexes = [ index for index in self.options.get("indexes", []) if index.name != operation.name ] return [ CreateModel( self.name, fields=self.fields, options={ **self.options, "indexes": options_indexes, }, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameIndex) and operation.old_fields: options_index_together = { fields for fields in self.options.get("index_together", []) if fields != operation.old_fields } if options_index_together: self.options["index_together"] = options_index_together else: self.options.pop("index_together", None) return [ CreateModel( self.name, fields=self.fields, options={ **self.options, "indexes": [ *self.options.get("indexes", []), models.Index( fields=operation.old_fields, name=operation.new_name ), ], }, bases=self.bases, managers=self.managers, ), ] return super().reduce(operation, app_label) class DeleteModel(ModelOperation): """Drop a model's table.""" def deconstruct(self): kwargs = { "name": self.name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label): # The deleted model could be referencing the specified model through # related fields. return True def describe(self): return "Delete model %s" % self.name @property def migration_name_fragment(self): return "delete_%s" % self.name_lower class RenameModel(ModelOperation): """Rename a model.""" def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "old_name": self.old_name, "new_name": self.new_name, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.rename_model(app_label, self.old_name, self.new_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model(*related_key)._meta.get_field( related_object.field.name ) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip( old_model._meta.local_many_to_many, new_model._meta.local_many_to_many ) for old_field, new_field in fields: # Skip self-referential fields as these are renamed above. if ( new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created ): continue # Rename columns and the M2M table. schema_editor._alter_many_to_many( new_model, old_field, new_field, strict=False, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) @property def migration_name_fragment(self): return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) def reduce(self, operation, app_label): if ( isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower ): return [ RenameModel( self.old_name, operation.new_name, ), ] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return super(ModelOperation, self).reduce( operation, app_label ) or not operation.references_model(self.new_name, app_label) class ModelOptionOperation(ModelOperation): def reduce(self, operation, app_label): if ( isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower ): return [operation] return super().reduce(operation, app_label) class AlterModelTable(ModelOptionOperation): """Rename a model's table.""" def __init__(self, name, table): self.table = table super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "table": self.table, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options(app_label, self.name_lower, {"db_table": self.table}) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for old_field, new_field in zip( old_model._meta.local_many_to_many, new_model._meta.local_many_to_many ): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Rename table for %s to %s" % ( self.name, self.table if self.table is not None else "(default)", ) @property def migration_name_fragment(self): return "alter_%s_table" % self.name_lower class AlterModelTableComment(ModelOptionOperation): def __init__(self, name, table_comment): self.table_comment = table_comment super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "table_comment": self.table_comment, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {"db_table_comment": self.table_comment} ) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table_comment( new_model, old_model._meta.db_table_comment, new_model._meta.db_table_comment, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return f"Alter {self.name} table comment" @property def migration_name_fragment(self): return f"alter_{self.name_lower}_table_comment" class AlterTogetherOptionOperation(ModelOptionOperation): option_name = None def __init__(self, name, option_value): if option_value: option_value = set(normalize_together(option_value)) setattr(self, self.option_name, option_value) super().__init__(name) @cached_property def option_value(self): return getattr(self, self.option_name) def deconstruct(self): kwargs = { "name": self.name, self.option_name: self.option_value, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.option_value}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) alter_together = getattr(schema_editor, "alter_%s" % self.option_name) alter_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( not self.option_value or any((name in fields) for fields in self.option_value) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % ( self.option_name, self.name, len(self.option_value or ""), ) @property def migration_name_fragment(self): return "alter_%s_%s" % (self.name_lower, self.option_name) def can_reduce_through(self, operation, app_label): return super().can_reduce_through(operation, app_label) or ( isinstance(operation, AlterTogetherOptionOperation) and type(operation) is not type(self) ) class AlterUniqueTogether(AlterTogetherOptionOperation): """ Change the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): super().__init__(name, unique_together) class AlterIndexTogether(AlterTogetherOptionOperation): """ Change the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): super().__init__(name, index_together) class AlterOrderWithRespectTo(ModelOptionOperation): """Represent a change with the order_with_respect_to option.""" option_name = "order_with_respect_to" def __init__(self, name, order_with_respect_to): self.order_with_respect_to = order_with_respect_to super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "order_with_respect_to": self.order_with_respect_to, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.order_with_respect_to}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if ( from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to ): schema_editor.remove_field( from_model, from_model._meta.get_field("_order") ) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif ( to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to ): field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) def describe(self): return "Set order_with_respect_to on %s to %s" % ( self.name, self.order_with_respect_to, ) @property def migration_name_fragment(self): return "alter_%s_order_with_respect_to" % self.name_lower class AlterModelOptions(ModelOptionOperation): """ Set new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "base_manager_name", "default_manager_name", "default_related_name", "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.options = options super().__init__(name) def deconstruct(self): kwargs = { "name": self.name, "options": self.options, } return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, self.options, self.ALTER_OPTION_KEYS, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change Meta options on %s" % self.name @property def migration_name_fragment(self): return "alter_%s_options" % self.name_lower class AlterModelManagers(ModelOptionOperation): """Alter the model's managers.""" serialization_expand_args = ["managers"] def __init__(self, name, managers): self.managers = managers super().__init__(name) def deconstruct(self): return (self.__class__.__qualname__, [self.name, self.managers], {}) def state_forwards(self, app_label, state): state.alter_model_managers(app_label, self.name_lower, self.managers) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change managers on %s" % self.name @property def migration_name_fragment(self): return "alter_%s_managers" % self.name_lower class IndexOperation(Operation): option_name = "indexes" @cached_property def model_name_lower(self): return self.model_name.lower() class AddIndex(IndexOperation): """Add an index on a model.""" def __init__(self, model_name, index): self.model_name = model_name if not index.name: raise ValueError( "Indexes passed to AddIndex operations require a name " "argument. %r doesn't have one." % index ) self.index = index def state_forwards(self, app_label, state): state.add_index(app_label, self.model_name_lower, self.index) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index) def deconstruct(self): kwargs = { "model_name": self.model_name, "index": self.index, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): if self.index.expressions: return "Create index %s on %s on model %s" % ( self.index.name, ", ".join([str(expression) for expression in self.index.expressions]), self.model_name, ) return "Create index %s on field(s) %s of model %s" % ( self.index.name, ", ".join(self.index.fields), self.model_name, ) @property def migration_name_fragment(self): return "%s_%s" % (self.model_name_lower, self.index.name.lower()) class RemoveIndex(IndexOperation): """Remove an index from a model.""" def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_index(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index) def deconstruct(self): kwargs = { "model_name": self.model_name, "name": self.name, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): return "Remove index %s from %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "remove_%s_%s" % (self.model_name_lower, self.name.lower()) class RenameIndex(IndexOperation): """Rename an index.""" def __init__(self, model_name, new_name, old_name=None, old_fields=None): if not old_name and not old_fields: raise ValueError( "RenameIndex requires one of old_name and old_fields arguments to be " "set." ) if old_name and old_fields: raise ValueError( "RenameIndex.old_name and old_fields are mutually exclusive." ) self.model_name = model_name self.new_name = new_name self.old_name = old_name self.old_fields = old_fields @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { "model_name": self.model_name, "new_name": self.new_name, } if self.old_name: kwargs["old_name"] = self.old_name if self.old_fields: kwargs["old_fields"] = self.old_fields return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): if self.old_fields: state.add_index( app_label, self.model_name_lower, models.Index(fields=self.old_fields, name=self.new_name), ) state.remove_model_options( app_label, self.model_name_lower, AlterIndexTogether.option_name, self.old_fields, ) else: state.rename_index( app_label, self.model_name_lower, self.old_name, self.new_name ) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if not self.allow_migrate_model(schema_editor.connection.alias, model): return if self.old_fields: from_model = from_state.apps.get_model(app_label, self.model_name) columns = [ from_model._meta.get_field(field).column for field in self.old_fields ] matching_index_name = schema_editor._constraint_names( from_model, column_names=columns, index=True ) if len(matching_index_name) != 1: raise ValueError( "Found wrong number (%s) of indexes for %s(%s)." % ( len(matching_index_name), from_model._meta.db_table, ", ".join(columns), ) ) old_index = models.Index( fields=self.old_fields, name=matching_index_name[0], ) else: from_model_state = from_state.models[app_label, self.model_name_lower] old_index = from_model_state.get_index_by_name(self.old_name) # Don't alter when the index name is not changed. if old_index.name == self.new_name: return to_model_state = to_state.models[app_label, self.model_name_lower] new_index = to_model_state.get_index_by_name(self.new_name) schema_editor.rename_index(model, old_index, new_index) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.old_fields: # Backward operation with unnamed index is a no-op. return self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = ( self.old_name_lower, self.new_name_lower, ) self.new_name, self.old_name = self.old_name, self.new_name def describe(self): if self.old_name: return ( f"Rename index {self.old_name} on {self.model_name} to {self.new_name}" ) return ( f"Rename unnamed index for {self.old_fields} on {self.model_name} to " f"{self.new_name}" ) @property def migration_name_fragment(self): if self.old_name: return "rename_%s_%s" % (self.old_name_lower, self.new_name_lower) return "rename_%s_%s_%s" % ( self.model_name_lower, "_".join(self.old_fields), self.new_name_lower, ) def reduce(self, operation, app_label): if ( isinstance(operation, RenameIndex) and self.model_name_lower == operation.model_name_lower and operation.old_name and self.new_name_lower == operation.old_name_lower ): return [ RenameIndex( self.model_name, new_name=operation.new_name, old_name=self.old_name, old_fields=self.old_fields, ) ] return super().reduce(operation, app_label) class AddConstraint(IndexOperation): option_name = "constraints" def __init__(self, model_name, constraint): self.model_name = model_name self.constraint = constraint def state_forwards(self, app_label, state): state.add_constraint(app_label, self.model_name_lower, self.constraint) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_constraint(model, self.constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_constraint(model, self.constraint) def deconstruct(self): return ( self.__class__.__name__, [], { "model_name": self.model_name, "constraint": self.constraint, }, ) def describe(self): return "Create constraint %s on model %s" % ( self.constraint.name, self.model_name, ) @property def migration_name_fragment(self): return "%s_%s" % (self.model_name_lower, self.constraint.name.lower()) class RemoveConstraint(IndexOperation): option_name = "constraints" def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_constraint(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] constraint = from_model_state.get_constraint_by_name(self.name) schema_editor.remove_constraint(model, constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] constraint = to_model_state.get_constraint_by_name(self.name) schema_editor.add_constraint(model, constraint) def deconstruct(self): return ( self.__class__.__name__, [], { "model_name": self.model_name, "name": self.name, }, ) def describe(self): return "Remove constraint %s from model %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
castiel248/Convert
Lib/site-packages/django/db/migrations/operations/models.py
Python
mit
42,988
from django.db import router from .base import Operation class SeparateDatabaseAndState(Operation): """ Take two lists of operations - ones that will be used for the database, and ones that will be used for the state change. This allows operations that don't support state change to have it applied, or have operations that affect the state or not the database, or so on. """ serialization_expand_args = ["database_operations", "state_operations"] def __init__(self, database_operations=None, state_operations=None): self.database_operations = database_operations or [] self.state_operations = state_operations or [] def deconstruct(self): kwargs = {} if self.database_operations: kwargs["database_operations"] = self.database_operations if self.state_operations: kwargs["state_operations"] = self.state_operations return (self.__class__.__qualname__, [], kwargs) def state_forwards(self, app_label, state): for state_operation in self.state_operations: state_operation.state_forwards(app_label, state) def database_forwards(self, app_label, schema_editor, from_state, to_state): # We calculate state separately in here since our state functions aren't useful for database_operation in self.database_operations: to_state = from_state.clone() database_operation.state_forwards(app_label, to_state) database_operation.database_forwards( app_label, schema_editor, from_state, to_state ) from_state = to_state def database_backwards(self, app_label, schema_editor, from_state, to_state): # We calculate state separately in here since our state functions aren't useful to_states = {} for dbop in self.database_operations: to_states[dbop] = to_state to_state = to_state.clone() dbop.state_forwards(app_label, to_state) # to_state now has the states of all the database_operations applied # which is the from_state for the backwards migration of the last # operation. for database_operation in reversed(self.database_operations): from_state = to_state to_state = to_states[database_operation] database_operation.database_backwards( app_label, schema_editor, from_state, to_state ) def describe(self): return "Custom state/database change combination" class RunSQL(Operation): """ Run some raw SQL. A reverse SQL statement may be provided. Also accept a list of operations that represent the state change effected by this SQL change, in case it's custom column/table creation/deletion. """ noop = "" def __init__( self, sql, reverse_sql=None, state_operations=None, hints=None, elidable=False ): self.sql = sql self.reverse_sql = reverse_sql self.state_operations = state_operations or [] self.hints = hints or {} self.elidable = elidable def deconstruct(self): kwargs = { "sql": self.sql, } if self.reverse_sql is not None: kwargs["reverse_sql"] = self.reverse_sql if self.state_operations: kwargs["state_operations"] = self.state_operations if self.hints: kwargs["hints"] = self.hints return (self.__class__.__qualname__, [], kwargs) @property def reversible(self): return self.reverse_sql is not None def state_forwards(self, app_label, state): for state_operation in self.state_operations: state_operation.state_forwards(app_label, state) def database_forwards(self, app_label, schema_editor, from_state, to_state): if router.allow_migrate( schema_editor.connection.alias, app_label, **self.hints ): self._run_sql(schema_editor, self.sql) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.reverse_sql is None: raise NotImplementedError("You cannot reverse this operation") if router.allow_migrate( schema_editor.connection.alias, app_label, **self.hints ): self._run_sql(schema_editor, self.reverse_sql) def describe(self): return "Raw SQL operation" def _run_sql(self, schema_editor, sqls): if isinstance(sqls, (list, tuple)): for sql in sqls: params = None if isinstance(sql, (list, tuple)): elements = len(sql) if elements == 2: sql, params = sql else: raise ValueError("Expected a 2-tuple but got %d" % elements) schema_editor.execute(sql, params=params) elif sqls != RunSQL.noop: statements = schema_editor.connection.ops.prepare_sql_script(sqls) for statement in statements: schema_editor.execute(statement, params=None) class RunPython(Operation): """ Run Python code in a context suitable for doing versioned ORM operations. """ reduces_to_sql = False def __init__( self, code, reverse_code=None, atomic=None, hints=None, elidable=False ): self.atomic = atomic # Forwards code if not callable(code): raise ValueError("RunPython must be supplied with a callable") self.code = code # Reverse code if reverse_code is None: self.reverse_code = None else: if not callable(reverse_code): raise ValueError("RunPython must be supplied with callable arguments") self.reverse_code = reverse_code self.hints = hints or {} self.elidable = elidable def deconstruct(self): kwargs = { "code": self.code, } if self.reverse_code is not None: kwargs["reverse_code"] = self.reverse_code if self.atomic is not None: kwargs["atomic"] = self.atomic if self.hints: kwargs["hints"] = self.hints return (self.__class__.__qualname__, [], kwargs) @property def reversible(self): return self.reverse_code is not None def state_forwards(self, app_label, state): # RunPython objects have no state effect. To add some, combine this # with SeparateDatabaseAndState. pass def database_forwards(self, app_label, schema_editor, from_state, to_state): # RunPython has access to all models. Ensure that all models are # reloaded in case any are delayed. from_state.clear_delayed_apps_cache() if router.allow_migrate( schema_editor.connection.alias, app_label, **self.hints ): # We now execute the Python code in a context that contains a 'models' # object, representing the versioned models as an app registry. # We could try to override the global cache, but then people will still # use direct imports, so we go with a documentation approach instead. self.code(from_state.apps, schema_editor) def database_backwards(self, app_label, schema_editor, from_state, to_state): if self.reverse_code is None: raise NotImplementedError("You cannot reverse this operation") if router.allow_migrate( schema_editor.connection.alias, app_label, **self.hints ): self.reverse_code(from_state.apps, schema_editor) def describe(self): return "Raw Python operation" @staticmethod def noop(apps, schema_editor): return None
castiel248/Convert
Lib/site-packages/django/db/migrations/operations/special.py
Python
mit
7,831
class MigrationOptimizer: """ Power the optimization process, where you provide a list of Operations and you are returned a list of equal or shorter length - operations are merged into one if possible. For example, a CreateModel and an AddField can be optimized into a new CreateModel, and CreateModel and DeleteModel can be optimized into nothing. """ def optimize(self, operations, app_label): """ Main optimization entry point. Pass in a list of Operation instances, get out a new list of Operation instances. Unfortunately, due to the scope of the optimization (two combinable operations might be separated by several hundred others), this can't be done as a peephole optimization with checks/output implemented on the Operations themselves; instead, the optimizer looks at each individual operation and scans forwards in the list to see if there are any matches, stopping at boundaries - operations which can't be optimized over (RunSQL, operations on the same field/model, etc.) The inner loop is run until the starting list is the same as the result list, and then the result is returned. This means that operation optimization must be stable and always return an equal or shorter list. """ # Internal tracking variable for test assertions about # of loops if app_label is None: raise TypeError("app_label must be a str.") self._iterations = 0 while True: result = self.optimize_inner(operations, app_label) self._iterations += 1 if result == operations: return result operations = result def optimize_inner(self, operations, app_label): """Inner optimization loop.""" new_operations = [] for i, operation in enumerate(operations): right = True # Should we reduce on the right or on the left. # Compare it to each operation after it for j, other in enumerate(operations[i + 1 :]): result = operation.reduce(other, app_label) if isinstance(result, list): in_between = operations[i + 1 : i + j + 1] if right: new_operations.extend(in_between) new_operations.extend(result) elif all(op.reduce(other, app_label) is True for op in in_between): # Perform a left reduction if all of the in-between # operations can optimize through other. new_operations.extend(result) new_operations.extend(in_between) else: # Otherwise keep trying. new_operations.append(operation) break new_operations.extend(operations[i + j + 2 :]) return new_operations elif not result: # Can't perform a right reduction. right = False else: new_operations.append(operation) return new_operations
castiel248/Convert
Lib/site-packages/django/db/migrations/optimizer.py
Python
mit
3,255
import datetime import importlib import os import sys from django.apps import apps from django.core.management.base import OutputWrapper from django.db.models import NOT_PROVIDED from django.utils import timezone from django.utils.version import get_docs_version from .loader import MigrationLoader class MigrationQuestioner: """ Give the autodetector responses to questions it might have. This base class has a built-in noninteractive mode, but the interactive subclass is what the command-line arguments will use. """ def __init__(self, defaults=None, specified_apps=None, dry_run=None): self.defaults = defaults or {} self.specified_apps = specified_apps or set() self.dry_run = dry_run def ask_initial(self, app_label): """Should we create an initial migration for the app?""" # If it was specified on the command line, definitely true if app_label in self.specified_apps: return True # Otherwise, we look to see if it has a migrations module # without any Python files in it, apart from __init__.py. # Apps from the new app template will have these; the Python # file check will ensure we skip South ones. try: app_config = apps.get_app_config(app_label) except LookupError: # It's a fake app. return self.defaults.get("ask_initial", False) migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label) if migrations_import_path is None: # It's an application with migrations disabled. return self.defaults.get("ask_initial", False) try: migrations_module = importlib.import_module(migrations_import_path) except ImportError: return self.defaults.get("ask_initial", False) else: if getattr(migrations_module, "__file__", None): filenames = os.listdir(os.path.dirname(migrations_module.__file__)) elif hasattr(migrations_module, "__path__"): if len(migrations_module.__path__) > 1: return False filenames = os.listdir(list(migrations_module.__path__)[0]) return not any(x.endswith(".py") for x in filenames if x != "__init__.py") def ask_not_null_addition(self, field_name, model_name): """Adding a NOT NULL field to a model.""" # None means quit return None def ask_not_null_alteration(self, field_name, model_name): """Changing a NULL field to NOT NULL.""" # None means quit return None def ask_rename(self, model_name, old_name, new_name, field_instance): """Was this field really renamed?""" return self.defaults.get("ask_rename", False) def ask_rename_model(self, old_model_state, new_model_state): """Was this model really renamed?""" return self.defaults.get("ask_rename_model", False) def ask_merge(self, app_label): """Should these migrations really be merged?""" return self.defaults.get("ask_merge", False) def ask_auto_now_add_addition(self, field_name, model_name): """Adding an auto_now_add field to a model.""" # None means quit return None def ask_unique_callable_default_addition(self, field_name, model_name): """Adding a unique field with a callable default.""" # None means continue. return None class InteractiveMigrationQuestioner(MigrationQuestioner): def __init__( self, defaults=None, specified_apps=None, dry_run=None, prompt_output=None ): super().__init__( defaults=defaults, specified_apps=specified_apps, dry_run=dry_run ) self.prompt_output = prompt_output or OutputWrapper(sys.stdout) def _boolean_input(self, question, default=None): self.prompt_output.write(f"{question} ", ending="") result = input() if not result and default is not None: return default while not result or result[0].lower() not in "yn": self.prompt_output.write("Please answer yes or no: ", ending="") result = input() return result[0].lower() == "y" def _choice_input(self, question, choices): self.prompt_output.write(f"{question}") for i, choice in enumerate(choices): self.prompt_output.write(" %s) %s" % (i + 1, choice)) self.prompt_output.write("Select an option: ", ending="") result = input() while True: try: value = int(result) except ValueError: pass else: if 0 < value <= len(choices): return value self.prompt_output.write("Please select a valid option: ", ending="") result = input() def _ask_default(self, default=""): """ Prompt for a default value. The ``default`` argument allows providing a custom default value (as a string) which will be shown to the user and used as the return value if the user doesn't provide any other input. """ self.prompt_output.write("Please enter the default value as valid Python.") if default: self.prompt_output.write( f"Accept the default '{default}' by pressing 'Enter' or " f"provide another value." ) self.prompt_output.write( "The datetime and django.utils.timezone modules are available, so " "it is possible to provide e.g. timezone.now as a value." ) self.prompt_output.write("Type 'exit' to exit this prompt") while True: if default: prompt = "[default: {}] >>> ".format(default) else: prompt = ">>> " self.prompt_output.write(prompt, ending="") code = input() if not code and default: code = default if not code: self.prompt_output.write( "Please enter some code, or 'exit' (without quotes) to exit." ) elif code == "exit": sys.exit(1) else: try: return eval(code, {}, {"datetime": datetime, "timezone": timezone}) except (SyntaxError, NameError) as e: self.prompt_output.write("Invalid input: %s" % e) def ask_not_null_addition(self, field_name, model_name): """Adding a NOT NULL field to a model.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to add a non-nullable field '{field_name}' " f"to {model_name} without specifying a default. This is " f"because the database needs something to populate existing " f"rows.\n" f"Please select a fix:", [ ( "Provide a one-off default now (will be set on all existing " "rows with a null value for this column)" ), "Quit and manually define a default value in models.py.", ], ) if choice == 2: sys.exit(3) else: return self._ask_default() return None def ask_not_null_alteration(self, field_name, model_name): """Changing a NULL field to NOT NULL.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to change a nullable field '{field_name}' " f"on {model_name} to non-nullable without providing a " f"default. This is because the database needs something to " f"populate existing rows.\n" f"Please select a fix:", [ ( "Provide a one-off default now (will be set on all existing " "rows with a null value for this column)" ), "Ignore for now. Existing rows that contain NULL values " "will have to be handled manually, for example with a " "RunPython or RunSQL operation.", "Quit and manually define a default value in models.py.", ], ) if choice == 2: return NOT_PROVIDED elif choice == 3: sys.exit(3) else: return self._ask_default() return None def ask_rename(self, model_name, old_name, new_name, field_instance): """Was this field really renamed?""" msg = "Was %s.%s renamed to %s.%s (a %s)? [y/N]" return self._boolean_input( msg % ( model_name, old_name, model_name, new_name, field_instance.__class__.__name__, ), False, ) def ask_rename_model(self, old_model_state, new_model_state): """Was this model really renamed?""" msg = "Was the model %s.%s renamed to %s? [y/N]" return self._boolean_input( msg % (old_model_state.app_label, old_model_state.name, new_model_state.name), False, ) def ask_merge(self, app_label): return self._boolean_input( "\nMerging will only work if the operations printed above do not conflict\n" + "with each other (working on different fields or models)\n" + "Should these migration branches be merged? [y/N]", False, ) def ask_auto_now_add_addition(self, field_name, model_name): """Adding an auto_now_add field to a model.""" if not self.dry_run: choice = self._choice_input( f"It is impossible to add the field '{field_name}' with " f"'auto_now_add=True' to {model_name} without providing a " f"default. This is because the database needs something to " f"populate existing rows.\n", [ "Provide a one-off default now which will be set on all " "existing rows", "Quit and manually define a default value in models.py.", ], ) if choice == 2: sys.exit(3) else: return self._ask_default(default="timezone.now") return None def ask_unique_callable_default_addition(self, field_name, model_name): """Adding a unique field with a callable default.""" if not self.dry_run: version = get_docs_version() choice = self._choice_input( f"Callable default on unique field {model_name}.{field_name} " f"will not generate unique values upon migrating.\n" f"Please choose how to proceed:\n", [ f"Continue making this migration as the first step in " f"writing a manual migration to generate unique values " f"described here: " f"https://docs.djangoproject.com/en/{version}/howto/" f"writing-migrations/#migrations-that-add-unique-fields.", "Quit and edit field options in models.py.", ], ) if choice == 2: sys.exit(3) return None class NonInteractiveMigrationQuestioner(MigrationQuestioner): def __init__( self, defaults=None, specified_apps=None, dry_run=None, verbosity=1, log=None, ): self.verbosity = verbosity self.log = log super().__init__( defaults=defaults, specified_apps=specified_apps, dry_run=dry_run, ) def log_lack_of_migration(self, field_name, model_name, reason): if self.verbosity > 0: self.log( f"Field '{field_name}' on model '{model_name}' not migrated: " f"{reason}." ) def ask_not_null_addition(self, field_name, model_name): # We can't ask the user, so act like the user aborted. self.log_lack_of_migration( field_name, model_name, "it is impossible to add a non-nullable field without specifying " "a default", ) sys.exit(3) def ask_not_null_alteration(self, field_name, model_name): # We can't ask the user, so set as not provided. self.log( f"Field '{field_name}' on model '{model_name}' given a default of " f"NOT PROVIDED and must be corrected." ) return NOT_PROVIDED def ask_auto_now_add_addition(self, field_name, model_name): # We can't ask the user, so act like the user aborted. self.log_lack_of_migration( field_name, model_name, "it is impossible to add a field with 'auto_now_add=True' without " "specifying a default", ) sys.exit(3)
castiel248/Convert
Lib/site-packages/django/db/migrations/questioner.py
Python
mit
13,330
from django.apps.registry import Apps from django.db import DatabaseError, models from django.utils.functional import classproperty from django.utils.timezone import now from .exceptions import MigrationSchemaMissing class MigrationRecorder: """ Deal with storing migration records in the database. Because this table is actually itself used for dealing with model creation, it's the one thing we can't do normally via migrations. We manually handle table creation/schema updating (using schema backend) and then have a floating model to do queries with. If a migration is unapplied its row is removed from the table. Having a row in the table always means a migration is applied. """ _migration_class = None @classproperty def Migration(cls): """ Lazy load to avoid AppRegistryNotReady if installed apps import MigrationRecorder. """ if cls._migration_class is None: class Migration(models.Model): app = models.CharField(max_length=255) name = models.CharField(max_length=255) applied = models.DateTimeField(default=now) class Meta: apps = Apps() app_label = "migrations" db_table = "django_migrations" def __str__(self): return "Migration %s for %s" % (self.name, self.app) cls._migration_class = Migration return cls._migration_class def __init__(self, connection): self.connection = connection @property def migration_qs(self): return self.Migration.objects.using(self.connection.alias) def has_table(self): """Return True if the django_migrations table exists.""" with self.connection.cursor() as cursor: tables = self.connection.introspection.table_names(cursor) return self.Migration._meta.db_table in tables def ensure_schema(self): """Ensure the table exists and has the correct schema.""" # If the table's there, that's fine - we've never changed its schema # in the codebase. if self.has_table(): return # Make the table try: with self.connection.schema_editor() as editor: editor.create_model(self.Migration) except DatabaseError as exc: raise MigrationSchemaMissing( "Unable to create the django_migrations table (%s)" % exc ) def applied_migrations(self): """ Return a dict mapping (app_name, migration_name) to Migration instances for all applied migrations. """ if self.has_table(): return { (migration.app, migration.name): migration for migration in self.migration_qs } else: # If the django_migrations table doesn't exist, then no migrations # are applied. return {} def record_applied(self, app, name): """Record that a migration was applied.""" self.ensure_schema() self.migration_qs.create(app=app, name=name) def record_unapplied(self, app, name): """Record that a migration was unapplied.""" self.ensure_schema() self.migration_qs.filter(app=app, name=name).delete() def flush(self): """Delete all migration records. Useful for testing migrations.""" self.migration_qs.all().delete()
castiel248/Convert
Lib/site-packages/django/db/migrations/recorder.py
Python
mit
3,535
import builtins import collections.abc import datetime import decimal import enum import functools import math import os import pathlib import re import types import uuid from django.conf import SettingsReference from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject from django.utils.functional import LazyObject, Promise from django.utils.version import PY311, get_docs_version class BaseSerializer: def __init__(self, value): self.value = value def serialize(self): raise NotImplementedError( "Subclasses of BaseSerializer must implement the serialize() method." ) class BaseSequenceSerializer(BaseSerializer): def _format(self): raise NotImplementedError( "Subclasses of BaseSequenceSerializer must implement the _format() method." ) def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) value = self._format() return value % (", ".join(strings)), imports class BaseSimpleSerializer(BaseSerializer): def serialize(self): return repr(self.value), set() class ChoicesSerializer(BaseSerializer): def serialize(self): return serializer_factory(self.value.value).serialize() class DateTimeSerializer(BaseSerializer): """For datetime.*, except datetime.datetime.""" def serialize(self): return repr(self.value), {"import datetime"} class DatetimeDatetimeSerializer(BaseSerializer): """For datetime.datetime.""" def serialize(self): if self.value.tzinfo is not None and self.value.tzinfo != datetime.timezone.utc: self.value = self.value.astimezone(datetime.timezone.utc) imports = ["import datetime"] return repr(self.value), set(imports) class DecimalSerializer(BaseSerializer): def serialize(self): return repr(self.value), {"from decimal import Decimal"} class DeconstructableSerializer(BaseSerializer): @staticmethod def serialize_deconstructed(path, args, kwargs): name, imports = DeconstructableSerializer._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = serializer_factory(arg).serialize() strings.append(arg_string) imports.update(arg_imports) for kw, arg in sorted(kwargs.items()): arg_string, arg_imports = serializer_factory(arg).serialize() imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @staticmethod def _serialize_path(path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports def serialize(self): return self.serialize_deconstructed(*self.value.deconstruct()) class DictionarySerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for k, v in sorted(self.value.items()): k_string, k_imports = serializer_factory(k).serialize() v_string, v_imports = serializer_factory(v).serialize() imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports class EnumSerializer(BaseSerializer): def serialize(self): enum_class = self.value.__class__ module = enum_class.__module__ if issubclass(enum_class, enum.Flag): if PY311: members = list(self.value) else: members, _ = enum._decompose(enum_class, self.value) members = reversed(members) else: members = (self.value,) return ( " | ".join( [ f"{module}.{enum_class.__qualname__}[{item.name!r}]" for item in members ] ), {"import %s" % module}, ) class FloatSerializer(BaseSimpleSerializer): def serialize(self): if math.isnan(self.value) or math.isinf(self.value): return 'float("{}")'.format(self.value), set() return super().serialize() class FrozensetSerializer(BaseSequenceSerializer): def _format(self): return "frozenset([%s])" class FunctionTypeSerializer(BaseSerializer): def serialize(self): if getattr(self.value, "__self__", None) and isinstance( self.value.__self__, type ): klass = self.value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), { "import %s" % module } # Further error checking if self.value.__name__ == "<lambda>": raise ValueError("Cannot serialize function: lambda") if self.value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % self.value) module_name = self.value.__module__ if "<" not in self.value.__qualname__: # Qualname can include <locals> return "%s.%s" % (module_name, self.value.__qualname__), { "import %s" % self.value.__module__ } raise ValueError( "Could not find function %s in %s.\n" % (self.value.__name__, module_name) ) class FunctoolsPartialSerializer(BaseSerializer): def serialize(self): # Serialize functools.partial() arguments func_string, func_imports = serializer_factory(self.value.func).serialize() args_string, args_imports = serializer_factory(self.value.args).serialize() keywords_string, keywords_imports = serializer_factory( self.value.keywords ).serialize() # Add any imports needed by arguments imports = {"import functools", *func_imports, *args_imports, *keywords_imports} return ( "functools.%s(%s, *%s, **%s)" % ( self.value.__class__.__name__, func_string, args_string, keywords_string, ), imports, ) class IterableSerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. value = "(%s)" if len(strings) != 1 else "(%s,)" return value % (", ".join(strings)), imports class ModelFieldSerializer(DeconstructableSerializer): def serialize(self): attr_name, path, args, kwargs = self.value.deconstruct() return self.serialize_deconstructed(path, args, kwargs) class ModelManagerSerializer(DeconstructableSerializer): def serialize(self): as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() if as_manager: name, imports = self._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return self.serialize_deconstructed(manager_path, args, kwargs) class OperationSerializer(BaseSerializer): def serialize(self): from django.db.migrations.writer import OperationWriter string, imports = OperationWriter(self.value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(","), imports class PathLikeSerializer(BaseSerializer): def serialize(self): return repr(os.fspath(self.value)), {} class PathSerializer(BaseSerializer): def serialize(self): # Convert concrete paths to pure paths to avoid issues with migrations # generated on one platform being used on a different platform. prefix = "Pure" if isinstance(self.value, pathlib.Path) else "" return "pathlib.%s%r" % (prefix, self.value), {"import pathlib"} class RegexSerializer(BaseSerializer): def serialize(self): regex_pattern, pattern_imports = serializer_factory( self.value.pattern ).serialize() # Turn off default implicit flags (e.g. re.U) because regexes with the # same implicit and explicit flags aren't equal. flags = self.value.flags ^ re.compile("").flags regex_flags, flag_imports = serializer_factory(flags).serialize() imports = {"import re", *pattern_imports, *flag_imports} args = [regex_pattern] if flags: args.append(regex_flags) return "re.compile(%s)" % ", ".join(args), imports class SequenceSerializer(BaseSequenceSerializer): def _format(self): return "[%s]" class SetSerializer(BaseSequenceSerializer): def _format(self): # Serialize as a set literal except when value is empty because {} # is an empty dict. return "{%s}" if self.value else "set(%s)" class SettingsReferenceSerializer(BaseSerializer): def serialize(self): return "settings.%s" % self.value.setting_name, { "from django.conf import settings" } class TupleSerializer(BaseSequenceSerializer): def _format(self): # When len(value)==0, the empty tuple should be serialized as "()", # not "(,)" because (,) is invalid Python syntax. return "(%s)" if len(self.value) != 1 else "(%s,)" class TypeSerializer(BaseSerializer): def serialize(self): special_cases = [ (models.Model, "models.Model", ["from django.db import models"]), (type(None), "type(None)", []), ] for case, string, imports in special_cases: if case is self.value: return string, set(imports) if hasattr(self.value, "__module__"): module = self.value.__module__ if module == builtins.__name__: return self.value.__name__, set() else: return "%s.%s" % (module, self.value.__qualname__), { "import %s" % module } class UUIDSerializer(BaseSerializer): def serialize(self): return "uuid.%s" % repr(self.value), {"import uuid"} class Serializer: _registry = { # Some of these are order-dependent. frozenset: FrozensetSerializer, list: SequenceSerializer, set: SetSerializer, tuple: TupleSerializer, dict: DictionarySerializer, models.Choices: ChoicesSerializer, enum.Enum: EnumSerializer, datetime.datetime: DatetimeDatetimeSerializer, (datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer, SettingsReference: SettingsReferenceSerializer, float: FloatSerializer, (bool, int, type(None), bytes, str, range): BaseSimpleSerializer, decimal.Decimal: DecimalSerializer, (functools.partial, functools.partialmethod): FunctoolsPartialSerializer, ( types.FunctionType, types.BuiltinFunctionType, types.MethodType, ): FunctionTypeSerializer, collections.abc.Iterable: IterableSerializer, (COMPILED_REGEX_TYPE, RegexObject): RegexSerializer, uuid.UUID: UUIDSerializer, pathlib.PurePath: PathSerializer, os.PathLike: PathLikeSerializer, } @classmethod def register(cls, type_, serializer): if not issubclass(serializer, BaseSerializer): raise ValueError( "'%s' must inherit from 'BaseSerializer'." % serializer.__name__ ) cls._registry[type_] = serializer @classmethod def unregister(cls, type_): cls._registry.pop(type_) def serializer_factory(value): if isinstance(value, Promise): value = str(value) elif isinstance(value, LazyObject): # The unwrapped value is returned as the first item of the arguments # tuple. value = value.__reduce__()[1][0] if isinstance(value, models.Field): return ModelFieldSerializer(value) if isinstance(value, models.manager.BaseManager): return ModelManagerSerializer(value) if isinstance(value, Operation): return OperationSerializer(value) if isinstance(value, type): return TypeSerializer(value) # Anything that knows how to deconstruct itself. if hasattr(value, "deconstruct"): return DeconstructableSerializer(value) for type_, serializer_cls in Serializer._registry.items(): if isinstance(value, type_): return serializer_cls(value) raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) )
castiel248/Convert
Lib/site-packages/django/db/migrations/serializer.py
Python
mit
13,560
import copy from collections import defaultdict from contextlib import contextmanager from functools import partial from django.apps import AppConfig from django.apps.registry import Apps from django.apps.registry import apps as global_apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.db import models from django.db.migrations.utils import field_is_referenced, get_references from django.db.models import NOT_PROVIDED from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT from django.db.models.options import DEFAULT_NAMES, normalize_together from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.version import get_docs_version from .exceptions import InvalidBasesError from .utils import resolve_relation def _get_app_label_and_model_name(model, app_label=""): if isinstance(model, str): split = model.split(".", 1) return tuple(split) if len(split) == 2 else (app_label, split[0]) else: return model._meta.app_label, model._meta.model_name def _get_related_models(m): """Return all models that have a direct relationship to the given model.""" related_models = [ subclass for subclass in m.__subclasses__() if issubclass(subclass, models.Model) ] related_fields_models = set() for f in m._meta.get_fields(include_parents=True, include_hidden=True): if ( f.is_relation and f.related_model is not None and not isinstance(f.related_model, str) ): related_fields_models.add(f.model) related_models.append(f.related_model) # Reverse accessors of foreign keys to proxy models are attached to their # concrete proxied model. opts = m._meta if opts.proxy and m in related_fields_models: related_models.append(opts.concrete_model) return related_models def get_related_models_tuples(model): """ Return a list of typical (app_label, model_name) tuples for all related models for the given model. """ return { (rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model) } def get_related_models_recursive(model): """ Return all models that have a direct or indirect relationship to the given model. Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another model (a superclass is related to its subclasses, but not vice versa). Note, however, that a model inheriting from a concrete model is also related to its superclass through the implicit *_ptr OneToOneField on the subclass. """ seen = set() queue = _get_related_models(model) for rel_mod in queue: rel_app_label, rel_model_name = ( rel_mod._meta.app_label, rel_mod._meta.model_name, ) if (rel_app_label, rel_model_name) in seen: continue seen.add((rel_app_label, rel_model_name)) queue.extend(_get_related_models(rel_mod)) return seen - {(model._meta.app_label, model._meta.model_name)} class ProjectState: """ Represent the entire project's overall state. This is the item that is passed around - do it here rather than at the app level so that cross-app FKs/etc. resolve properly. """ def __init__(self, models=None, real_apps=None): self.models = models or {} # Apps to include from main registry, usually unmigrated ones if real_apps is None: real_apps = set() else: assert isinstance(real_apps, set) self.real_apps = real_apps self.is_delayed = False # {remote_model_key: {model_key: {field_name: field}}} self._relations = None @property def relations(self): if self._relations is None: self.resolve_fields_and_relations() return self._relations def add_model(self, model_state): model_key = model_state.app_label, model_state.name_lower self.models[model_key] = model_state if self._relations is not None: self.resolve_model_relations(model_key) if "apps" in self.__dict__: # hasattr would cache the property self.reload_model(*model_key) def remove_model(self, app_label, model_name): model_key = app_label, model_name del self.models[model_key] if self._relations is not None: self._relations.pop(model_key, None) # Call list() since _relations can change size during iteration. for related_model_key, model_relations in list(self._relations.items()): model_relations.pop(model_key, None) if not model_relations: del self._relations[related_model_key] if "apps" in self.__dict__: # hasattr would cache the property self.apps.unregister_model(*model_key) # Need to do this explicitly since unregister_model() doesn't clear # the cache automatically (#24513) self.apps.clear_cache() def rename_model(self, app_label, old_name, new_name): # Add a new model. old_name_lower = old_name.lower() new_name_lower = new_name.lower() renamed_model = self.models[app_label, old_name_lower].clone() renamed_model.name = new_name self.models[app_label, new_name_lower] = renamed_model # Repoint all fields pointing to the old model to the new one. old_model_tuple = (app_label, old_name_lower) new_remote_model = f"{app_label}.{new_name}" to_reload = set() for model_state, name, field, reference in get_references( self, old_model_tuple ): changed_field = None if reference.to: changed_field = field.clone() changed_field.remote_field.model = new_remote_model if reference.through: if changed_field is None: changed_field = field.clone() changed_field.remote_field.through = new_remote_model if changed_field: model_state.fields[name] = changed_field to_reload.add((model_state.app_label, model_state.name_lower)) if self._relations is not None: old_name_key = app_label, old_name_lower new_name_key = app_label, new_name_lower if old_name_key in self._relations: self._relations[new_name_key] = self._relations.pop(old_name_key) for model_relations in self._relations.values(): if old_name_key in model_relations: model_relations[new_name_key] = model_relations.pop(old_name_key) # Reload models related to old model before removing the old model. self.reload_models(to_reload, delay=True) # Remove the old model. self.remove_model(app_label, old_name_lower) self.reload_model(app_label, new_name_lower, delay=True) def alter_model_options(self, app_label, model_name, options, option_keys=None): model_state = self.models[app_label, model_name] model_state.options = {**model_state.options, **options} if option_keys: for key in option_keys: if key not in options: model_state.options.pop(key, False) self.reload_model(app_label, model_name, delay=True) def remove_model_options(self, app_label, model_name, option_name, value_to_remove): model_state = self.models[app_label, model_name] if objs := model_state.options.get(option_name): model_state.options[option_name] = [ obj for obj in objs if tuple(obj) != tuple(value_to_remove) ] self.reload_model(app_label, model_name, delay=True) def alter_model_managers(self, app_label, model_name, managers): model_state = self.models[app_label, model_name] model_state.managers = list(managers) self.reload_model(app_label, model_name, delay=True) def _append_option(self, app_label, model_name, option_name, obj): model_state = self.models[app_label, model_name] model_state.options[option_name] = [*model_state.options[option_name], obj] self.reload_model(app_label, model_name, delay=True) def _remove_option(self, app_label, model_name, option_name, obj_name): model_state = self.models[app_label, model_name] objs = model_state.options[option_name] model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name] self.reload_model(app_label, model_name, delay=True) def add_index(self, app_label, model_name, index): self._append_option(app_label, model_name, "indexes", index) def remove_index(self, app_label, model_name, index_name): self._remove_option(app_label, model_name, "indexes", index_name) def rename_index(self, app_label, model_name, old_index_name, new_index_name): model_state = self.models[app_label, model_name] objs = model_state.options["indexes"] new_indexes = [] for obj in objs: if obj.name == old_index_name: obj = obj.clone() obj.name = new_index_name new_indexes.append(obj) model_state.options["indexes"] = new_indexes self.reload_model(app_label, model_name, delay=True) def add_constraint(self, app_label, model_name, constraint): self._append_option(app_label, model_name, "constraints", constraint) def remove_constraint(self, app_label, model_name, constraint_name): self._remove_option(app_label, model_name, "constraints", constraint_name) def add_field(self, app_label, model_name, name, field, preserve_default): # If preserve default is off, don't use the default for future state. if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field model_key = app_label, model_name self.models[model_key].fields[name] = field if self._relations is not None: self.resolve_model_field_relations(model_key, name, field) # Delay rendering of relationships if it's not a relational field. delay = not field.is_relation self.reload_model(*model_key, delay=delay) def remove_field(self, app_label, model_name, name): model_key = app_label, model_name model_state = self.models[model_key] old_field = model_state.fields.pop(name) if self._relations is not None: self.resolve_model_field_relations(model_key, name, old_field) # Delay rendering of relationships if it's not a relational field. delay = not old_field.is_relation self.reload_model(*model_key, delay=delay) def alter_field(self, app_label, model_name, name, field, preserve_default): if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field model_key = app_label, model_name fields = self.models[model_key].fields if self._relations is not None: old_field = fields.pop(name) if old_field.is_relation: self.resolve_model_field_relations(model_key, name, old_field) fields[name] = field if field.is_relation: self.resolve_model_field_relations(model_key, name, field) else: fields[name] = field # TODO: investigate if old relational fields must be reloaded or if # it's sufficient if the new field is (#27737). # Delay rendering of relationships if it's not a relational field and # not referenced by a foreign key. delay = not field.is_relation and not field_is_referenced( self, model_key, (name, field) ) self.reload_model(*model_key, delay=delay) def rename_field(self, app_label, model_name, old_name, new_name): model_key = app_label, model_name model_state = self.models[model_key] # Rename the field. fields = model_state.fields try: found = fields.pop(old_name) except KeyError: raise FieldDoesNotExist( f"{app_label}.{model_name} has no field named '{old_name}'" ) fields[new_name] = found for field in fields.values(): # Fix from_fields to refer to the new field. from_fields = getattr(field, "from_fields", None) if from_fields: field.from_fields = tuple( [ new_name if from_field_name == old_name else from_field_name for from_field_name in from_fields ] ) # Fix index/unique_together to refer to the new field. options = model_state.options for option in ("index_together", "unique_together"): if option in options: options[option] = [ [new_name if n == old_name else n for n in together] for together in options[option] ] # Fix to_fields to refer to the new field. delay = True references = get_references(self, model_key, (old_name, found)) for *_, field, reference in references: delay = False if reference.to: remote_field, to_fields = reference.to if getattr(remote_field, "field_name", None) == old_name: remote_field.field_name = new_name if to_fields: field.to_fields = tuple( [ new_name if to_field_name == old_name else to_field_name for to_field_name in to_fields ] ) if self._relations is not None: old_name_lower = old_name.lower() new_name_lower = new_name.lower() for to_model in self._relations.values(): if old_name_lower in to_model[model_key]: field = to_model[model_key].pop(old_name_lower) field.name = new_name_lower to_model[model_key][new_name_lower] = field self.reload_model(*model_key, delay=delay) def _find_reload_model(self, app_label, model_name, delay=False): if delay: self.is_delayed = True related_models = set() try: old_model = self.apps.get_model(app_label, model_name) except LookupError: pass else: # Get all relations to and from the old model before reloading, # as _meta.apps may change if delay: related_models = get_related_models_tuples(old_model) else: related_models = get_related_models_recursive(old_model) # Get all outgoing references from the model to be rendered model_state = self.models[(app_label, model_name)] # Directly related models are the models pointed to by ForeignKeys, # OneToOneFields, and ManyToManyFields. direct_related_models = set() for field in model_state.fields.values(): if field.is_relation: if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: continue rel_app_label, rel_model_name = _get_app_label_and_model_name( field.related_model, app_label ) direct_related_models.add((rel_app_label, rel_model_name.lower())) # For all direct related models recursively get all related models. related_models.update(direct_related_models) for rel_app_label, rel_model_name in direct_related_models: try: rel_model = self.apps.get_model(rel_app_label, rel_model_name) except LookupError: pass else: if delay: related_models.update(get_related_models_tuples(rel_model)) else: related_models.update(get_related_models_recursive(rel_model)) # Include the model itself related_models.add((app_label, model_name)) return related_models def reload_model(self, app_label, model_name, delay=False): if "apps" in self.__dict__: # hasattr would cache the property related_models = self._find_reload_model(app_label, model_name, delay) self._reload(related_models) def reload_models(self, models, delay=True): if "apps" in self.__dict__: # hasattr would cache the property related_models = set() for app_label, model_name in models: related_models.update( self._find_reload_model(app_label, model_name, delay) ) self._reload(related_models) def _reload(self, related_models): # Unregister all related models with self.apps.bulk_update(): for rel_app_label, rel_model_name in related_models: self.apps.unregister_model(rel_app_label, rel_model_name) states_to_be_rendered = [] # Gather all models states of those models that will be rerendered. # This includes: # 1. All related models of unmigrated apps for model_state in self.apps.real_models: if (model_state.app_label, model_state.name_lower) in related_models: states_to_be_rendered.append(model_state) # 2. All related models of migrated apps for rel_app_label, rel_model_name in related_models: try: model_state = self.models[rel_app_label, rel_model_name] except KeyError: pass else: states_to_be_rendered.append(model_state) # Render all models self.apps.render_multiple(states_to_be_rendered) def update_model_field_relation( self, model, model_key, field_name, field, concretes, ): remote_model_key = resolve_relation(model, *model_key) if remote_model_key[0] not in self.real_apps and remote_model_key in concretes: remote_model_key = concretes[remote_model_key] relations_to_remote_model = self._relations[remote_model_key] if field_name in self.models[model_key].fields: # The assert holds because it's a new relation, or an altered # relation, in which case references have been removed by # alter_field(). assert field_name not in relations_to_remote_model[model_key] relations_to_remote_model[model_key][field_name] = field else: del relations_to_remote_model[model_key][field_name] if not relations_to_remote_model[model_key]: del relations_to_remote_model[model_key] def resolve_model_field_relations( self, model_key, field_name, field, concretes=None, ): remote_field = field.remote_field if not remote_field: return if concretes is None: concretes, _ = self._get_concrete_models_mapping_and_proxy_models() self.update_model_field_relation( remote_field.model, model_key, field_name, field, concretes, ) through = getattr(remote_field, "through", None) if not through: return self.update_model_field_relation( through, model_key, field_name, field, concretes ) def resolve_model_relations(self, model_key, concretes=None): if concretes is None: concretes, _ = self._get_concrete_models_mapping_and_proxy_models() model_state = self.models[model_key] for field_name, field in model_state.fields.items(): self.resolve_model_field_relations(model_key, field_name, field, concretes) def resolve_fields_and_relations(self): # Resolve fields. for model_state in self.models.values(): for field_name, field in model_state.fields.items(): field.name = field_name # Resolve relations. # {remote_model_key: {model_key: {field_name: field}}} self._relations = defaultdict(partial(defaultdict, dict)) concretes, proxies = self._get_concrete_models_mapping_and_proxy_models() for model_key in concretes: self.resolve_model_relations(model_key, concretes) for model_key in proxies: self._relations[model_key] = self._relations[concretes[model_key]] def get_concrete_model_key(self, model): ( concrete_models_mapping, _, ) = self._get_concrete_models_mapping_and_proxy_models() model_key = make_model_tuple(model) return concrete_models_mapping[model_key] def _get_concrete_models_mapping_and_proxy_models(self): concrete_models_mapping = {} proxy_models = {} # Split models to proxy and concrete models. for model_key, model_state in self.models.items(): if model_state.options.get("proxy"): proxy_models[model_key] = model_state # Find a concrete model for the proxy. concrete_models_mapping[ model_key ] = self._find_concrete_model_from_proxy( proxy_models, model_state, ) else: concrete_models_mapping[model_key] = model_key return concrete_models_mapping, proxy_models def _find_concrete_model_from_proxy(self, proxy_models, model_state): for base in model_state.bases: if not (isinstance(base, str) or issubclass(base, models.Model)): continue base_key = make_model_tuple(base) base_state = proxy_models.get(base_key) if not base_state: # Concrete model found, stop looking at bases. return base_key return self._find_concrete_model_from_proxy(proxy_models, base_state) def clone(self): """Return an exact copy of this ProjectState.""" new_state = ProjectState( models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps, ) if "apps" in self.__dict__: new_state.apps = self.apps.clone() new_state.is_delayed = self.is_delayed return new_state def clear_delayed_apps_cache(self): if self.is_delayed and "apps" in self.__dict__: del self.__dict__["apps"] @cached_property def apps(self): return StateApps(self.real_apps, self.models) @classmethod def from_apps(cls, apps): """Take an Apps and return a ProjectState matching it.""" app_models = {} for model in apps.get_models(include_swapped=True): model_state = ModelState.from_model(model) app_models[(model_state.app_label, model_state.name_lower)] = model_state return cls(app_models) def __eq__(self, other): return self.models == other.models and self.real_apps == other.real_apps class AppConfigStub(AppConfig): """Stub of an AppConfig. Only provides a label and a dict of models.""" def __init__(self, label): self.apps = None self.models = {} # App-label and app-name are not the same thing, so technically passing # in the label here is wrong. In practice, migrations don't care about # the app name, but we need something unique, and the label works fine. self.label = label self.name = label def import_models(self): self.models = self.apps.all_models[self.label] class StateApps(Apps): """ Subclass of the global Apps registry class to better handle dynamic model additions and removals. """ def __init__(self, real_apps, models, ignore_swappable=False): # Any apps in self.real_apps should have all their models included # in the render. We don't use the original model instances as there # are some variables that refer to the Apps object. # FKs/M2Ms from real apps are also not included as they just # mess things up with partial states (due to lack of dependencies) self.real_models = [] for app_label in real_apps: app = global_apps.get_app_config(app_label) for model in app.get_models(): self.real_models.append(ModelState.from_model(model, exclude_rels=True)) # Populate the app registry with a stub for each application. app_labels = {model_state.app_label for model_state in models.values()} app_configs = [ AppConfigStub(label) for label in sorted([*real_apps, *app_labels]) ] super().__init__(app_configs) # These locks get in the way of copying as implemented in clone(), # which is called whenever Django duplicates a StateApps before # updating it. self._lock = None self.ready_event = None self.render_multiple([*models.values(), *self.real_models]) # There shouldn't be any operations pending at this point. from django.core.checks.model_checks import _check_lazy_references ignore = ( {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() ) errors = _check_lazy_references(self, ignore=ignore) if errors: raise ValueError("\n".join(error.msg for error in errors)) @contextmanager def bulk_update(self): # Avoid clearing each model's cache for each change. Instead, clear # all caches when we're finished updating the model instances. ready = self.ready self.ready = False try: yield finally: self.ready = ready self.clear_cache() def render_multiple(self, model_states): # We keep trying to render the models in a loop, ignoring invalid # base errors, until the size of the unrendered models doesn't # decrease by at least one, meaning there's a base dependency loop/ # missing base. if not model_states: return # Prevent that all model caches are expired for each render. with self.bulk_update(): unrendered_models = model_states while unrendered_models: new_unrendered_models = [] for model in unrendered_models: try: model.render(self) except InvalidBasesError: new_unrendered_models.append(model) if len(new_unrendered_models) == len(unrendered_models): raise InvalidBasesError( "Cannot resolve bases for %r\nThis can happen if you are " "inheriting models from an app with migrations (e.g. " "contrib.auth)\n in an app with no migrations; see " "https://docs.djangoproject.com/en/%s/topics/migrations/" "#dependencies for more" % (new_unrendered_models, get_docs_version()) ) unrendered_models = new_unrendered_models def clone(self): """Return a clone of this registry.""" clone = StateApps([], {}) clone.all_models = copy.deepcopy(self.all_models) for app_label in self.app_configs: app_config = AppConfigStub(app_label) app_config.apps = clone app_config.import_models() clone.app_configs[app_label] = app_config # No need to actually clone them, they'll never change clone.real_models = self.real_models return clone def register_model(self, app_label, model): self.all_models[app_label][model._meta.model_name] = model if app_label not in self.app_configs: self.app_configs[app_label] = AppConfigStub(app_label) self.app_configs[app_label].apps = self self.app_configs[app_label].models[model._meta.model_name] = model self.do_pending_operations(model) self.clear_cache() def unregister_model(self, app_label, model_name): try: del self.all_models[app_label][model_name] del self.app_configs[app_label].models[model_name] except KeyError: pass class ModelState: """ Represent a Django Model. Don't use the actual Model class as it's not designed to have its options changed - instead, mutate this one and then render it into a Model as required. Note that while you are allowed to mutate .fields, you are not allowed to mutate the Field instances inside there themselves - you must instead assign new ones, as these are not detached during a clone. """ def __init__( self, app_label, name, fields, options=None, bases=None, managers=None ): self.app_label = app_label self.name = name self.fields = dict(fields) self.options = options or {} self.options.setdefault("indexes", []) self.options.setdefault("constraints", []) self.bases = bases or (models.Model,) self.managers = managers or [] for name, field in self.fields.items(): # Sanity-check that fields are NOT already bound to a model. if hasattr(field, "model"): raise ValueError( 'ModelState.fields cannot be bound to a model - "%s" is.' % name ) # Sanity-check that relation fields are NOT referring to a model class. if field.is_relation and hasattr(field.related_model, "_meta"): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.to" does. ' "Use a string reference instead." % name ) if field.many_to_many and hasattr(field.remote_field.through, "_meta"): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.through" ' "does. Use a string reference instead." % name ) # Sanity-check that indexes have their name set. for index in self.options["indexes"]: if not index.name: raise ValueError( "Indexes passed to ModelState require a name attribute. " "%r doesn't have one." % index ) @cached_property def name_lower(self): return self.name.lower() def get_field(self, field_name): if field_name == "_order": field_name = self.options.get("order_with_respect_to", field_name) return self.fields[field_name] @classmethod def from_model(cls, model, exclude_rels=False): """Given a model, return a ModelState representing it.""" # Deconstruct the fields fields = [] for field in model._meta.local_fields: if getattr(field, "remote_field", None) and exclude_rels: continue if isinstance(field, models.OrderWrt): continue name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError( "Couldn't reconstruct field %s on %s: %s" % ( name, model._meta.label, e, ) ) if not exclude_rels: for field in model._meta.local_many_to_many: name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError( "Couldn't reconstruct m2m field %s on %s: %s" % ( name, model._meta.object_name, e, ) ) # Extract the options options = {} for name in DEFAULT_NAMES: # Ignore some special options if name in ["apps", "app_label"]: continue elif name in model._meta.original_attrs: if name == "unique_together": ut = model._meta.original_attrs["unique_together"] options[name] = set(normalize_together(ut)) elif name == "index_together": it = model._meta.original_attrs["index_together"] options[name] = set(normalize_together(it)) elif name == "indexes": indexes = [idx.clone() for idx in model._meta.indexes] for index in indexes: if not index.name: index.set_name_with_model(model) options["indexes"] = indexes elif name == "constraints": options["constraints"] = [ con.clone() for con in model._meta.constraints ] else: options[name] = model._meta.original_attrs[name] # If we're ignoring relationships, remove all field-listing model # options (that option basically just means "make a stub model") if exclude_rels: for key in ["unique_together", "index_together", "order_with_respect_to"]: if key in options: del options[key] # Private fields are ignored, so remove options that refer to them. elif options.get("order_with_respect_to") in { field.name for field in model._meta.private_fields }: del options["order_with_respect_to"] def flatten_bases(model): bases = [] for base in model.__bases__: if hasattr(base, "_meta") and base._meta.abstract: bases.extend(flatten_bases(base)) else: bases.append(base) return bases # We can't rely on __mro__ directly because we only want to flatten # abstract models and not the whole tree. However by recursing on # __bases__ we may end up with duplicates and ordering issues, we # therefore discard any duplicates and reorder the bases according # to their index in the MRO. flattened_bases = sorted( set(flatten_bases(model)), key=lambda x: model.__mro__.index(x) ) # Make our record bases = tuple( (base._meta.label_lower if hasattr(base, "_meta") else base) for base in flattened_bases ) # Ensure at least one base inherits from models.Model if not any( (isinstance(base, str) or issubclass(base, models.Model)) for base in bases ): bases = (models.Model,) managers = [] manager_names = set() default_manager_shim = None for manager in model._meta.managers: if manager.name in manager_names: # Skip overridden managers. continue elif manager.use_in_migrations: # Copy managers usable in migrations. new_manager = copy.copy(manager) new_manager._set_creation_counter() elif manager is model._base_manager or manager is model._default_manager: # Shim custom managers used as default and base managers. new_manager = models.Manager() new_manager.model = manager.model new_manager.name = manager.name if manager is model._default_manager: default_manager_shim = new_manager else: continue manager_names.add(manager.name) managers.append((manager.name, new_manager)) # Ignore a shimmed default manager called objects if it's the only one. if managers == [("objects", default_manager_shim)]: managers = [] # Construct the new ModelState return cls( model._meta.app_label, model._meta.object_name, fields, options, bases, managers, ) def construct_managers(self): """Deep-clone the managers using deconstruction.""" # Sort all managers by their creation counter sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) for mgr_name, manager in sorted_managers: as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() if as_manager: qs_class = import_string(qs_path) yield mgr_name, qs_class.as_manager() else: manager_class = import_string(manager_path) yield mgr_name, manager_class(*args, **kwargs) def clone(self): """Return an exact copy of this ModelState.""" return self.__class__( app_label=self.app_label, name=self.name, fields=dict(self.fields), # Since options are shallow-copied here, operations such as # AddIndex must replace their option (e.g 'indexes') rather # than mutating it. options=dict(self.options), bases=self.bases, managers=list(self.managers), ) def render(self, apps): """Create a Model object from our current state into the given apps.""" # First, make a Meta object meta_contents = {"app_label": self.app_label, "apps": apps, **self.options} meta = type("Meta", (), meta_contents) # Then, work out our bases try: bases = tuple( (apps.get_model(base) if isinstance(base, str) else base) for base in self.bases ) except LookupError: raise InvalidBasesError( "Cannot resolve one or more bases from %r" % (self.bases,) ) # Clone fields for the body, add other bits. body = {name: field.clone() for name, field in self.fields.items()} body["Meta"] = meta body["__module__"] = "__fake__" # Restore managers body.update(self.construct_managers()) # Then, make a Model object (apps.register_model is called in __new__) return type(self.name, bases, body) def get_index_by_name(self, name): for index in self.options["indexes"]: if index.name == name: return index raise ValueError("No index named %s on model %s" % (name, self.name)) def get_constraint_by_name(self, name): for constraint in self.options["constraints"]: if constraint.name == name: return constraint raise ValueError("No constraint named %s on model %s" % (name, self.name)) def __repr__(self): return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) def __eq__(self, other): return ( (self.app_label == other.app_label) and (self.name == other.name) and (len(self.fields) == len(other.fields)) and all( k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:] for (k1, f1), (k2, f2) in zip( sorted(self.fields.items()), sorted(other.fields.items()), ) ) and (self.options == other.options) and (self.bases == other.bases) and (self.managers == other.managers) )
castiel248/Convert
Lib/site-packages/django/db/migrations/state.py
Python
mit
40,654
import datetime import re from collections import namedtuple from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT FieldReference = namedtuple("FieldReference", "to through") COMPILED_REGEX_TYPE = type(re.compile("")) class RegexObject: def __init__(self, obj): self.pattern = obj.pattern self.flags = obj.flags def __eq__(self, other): if not isinstance(other, RegexObject): return NotImplemented return self.pattern == other.pattern and self.flags == other.flags def get_migration_name_timestamp(): return datetime.datetime.now().strftime("%Y%m%d_%H%M") def resolve_relation(model, app_label=None, model_name=None): """ Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship. """ if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError( "app_label and model_name must be provided to resolve " "recursive relationships." ) return app_label, model_name if "." in model: app_label, model_name = model.split(".", 1) return app_label, model_name.lower() if app_label is None: raise TypeError( "app_label must be provided to resolve unscoped model relationships." ) return app_label, model.lower() return model._meta.app_label, model._meta.model_name def field_references( model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None, ): """ Return either False or a FieldReference if `field` references provided context. False positives can be returned if `reference_field_name` is provided without `reference_field` because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place. """ remote_field = field.remote_field if not remote_field: return False references_to = None references_through = None if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple: to_fields = getattr(field, "to_fields", None) if ( reference_field_name is None or # Unspecified to_field(s). to_fields is None or # Reference to primary key. ( None in to_fields and (reference_field is None or reference_field.primary_key) ) or # Reference to field. reference_field_name in to_fields ): references_to = (remote_field, to_fields) through = getattr(remote_field, "through", None) if through and resolve_relation(through, *model_tuple) == reference_model_tuple: through_fields = remote_field.through_fields if ( reference_field_name is None or # Unspecified through_fields. through_fields is None or # Reference to field. reference_field_name in through_fields ): references_through = (remote_field, through_fields) if not (references_to or references_through): return False return FieldReference(references_to, references_through) def get_references(state, model_tuple, field_tuple=()): """ Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated. """ for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references( state_model_tuple, field, model_tuple, *field_tuple ) if reference: yield model_state, name, field, reference def field_is_referenced(state, model_tuple, field_tuple): """Return whether `field_tuple` is referenced by any state models.""" return next(get_references(state, model_tuple, field_tuple), None) is not None
castiel248/Convert
Lib/site-packages/django/db/migrations/utils.py
Python
mit
4,401
import os import re from importlib import import_module from django import get_version from django.apps import apps # SettingsReference imported for backwards compatibility in Django 2.2. from django.conf import SettingsReference # NOQA from django.db import migrations from django.db.migrations.loader import MigrationLoader from django.db.migrations.serializer import Serializer, serializer_factory from django.utils.inspect import get_func_args from django.utils.module_loading import module_dir from django.utils.timezone import now class OperationWriter: def __init__(self, operation, indentation=2): self.operation = operation self.buff = [] self.indentation = indentation def serialize(self): def _write(_arg_name, _arg_value): if _arg_name in self.operation.serialization_expand_args and isinstance( _arg_value, (list, tuple, dict) ): if isinstance(_arg_value, dict): self.feed("%s={" % _arg_name) self.indent() for key, value in _arg_value.items(): key_string, key_imports = MigrationWriter.serialize(key) arg_string, arg_imports = MigrationWriter.serialize(value) args = arg_string.splitlines() if len(args) > 1: self.feed("%s: %s" % (key_string, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed("%s," % args[-1]) else: self.feed("%s: %s," % (key_string, arg_string)) imports.update(key_imports) imports.update(arg_imports) self.unindent() self.feed("},") else: self.feed("%s=[" % _arg_name) self.indent() for item in _arg_value: arg_string, arg_imports = MigrationWriter.serialize(item) args = arg_string.splitlines() if len(args) > 1: for arg in args[:-1]: self.feed(arg) self.feed("%s," % args[-1]) else: self.feed("%s," % arg_string) imports.update(arg_imports) self.unindent() self.feed("],") else: arg_string, arg_imports = MigrationWriter.serialize(_arg_value) args = arg_string.splitlines() if len(args) > 1: self.feed("%s=%s" % (_arg_name, args[0])) for arg in args[1:-1]: self.feed(arg) self.feed("%s," % args[-1]) else: self.feed("%s=%s," % (_arg_name, arg_string)) imports.update(arg_imports) imports = set() name, args, kwargs = self.operation.deconstruct() operation_args = get_func_args(self.operation.__init__) # See if this operation is in django.db.migrations. If it is, # We can just use the fact we already have that imported, # otherwise, we need to add an import for the operation class. if getattr(migrations, name, None) == self.operation.__class__: self.feed("migrations.%s(" % name) else: imports.add("import %s" % (self.operation.__class__.__module__)) self.feed("%s.%s(" % (self.operation.__class__.__module__, name)) self.indent() for i, arg in enumerate(args): arg_value = arg arg_name = operation_args[i] _write(arg_name, arg_value) i = len(args) # Only iterate over remaining arguments for arg_name in operation_args[i:]: if arg_name in kwargs: # Don't sort to maintain signature order arg_value = kwargs[arg_name] _write(arg_name, arg_value) self.unindent() self.feed("),") return self.render(), imports def indent(self): self.indentation += 1 def unindent(self): self.indentation -= 1 def feed(self, line): self.buff.append(" " * (self.indentation * 4) + line) def render(self): return "\n".join(self.buff) class MigrationWriter: """ Take a Migration instance and is able to produce the contents of the migration file from it. """ def __init__(self, migration, include_header=True): self.migration = migration self.include_header = include_header self.needs_manual_porting = False def as_string(self): """Return a string of the file contents.""" items = { "replaces_str": "", "initial_str": "", } imports = set() # Deconstruct operations operations = [] for operation in self.migration.operations: operation_string, operation_imports = OperationWriter(operation).serialize() imports.update(operation_imports) operations.append(operation_string) items["operations"] = "\n".join(operations) + "\n" if operations else "" # Format dependencies and write out swappable dependencies right dependencies = [] for dependency in self.migration.dependencies: if dependency[0] == "__setting__": dependencies.append( " migrations.swappable_dependency(settings.%s)," % dependency[1] ) imports.add("from django.conf import settings") else: dependencies.append(" %s," % self.serialize(dependency)[0]) items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else "" # Format imports nicely, swapping imports of functions from migration files # for comments migration_imports = set() for line in list(imports): if re.match(r"^import (.*)\.\d+[^\s]*$", line): migration_imports.add(line.split("import")[1].strip()) imports.remove(line) self.needs_manual_porting = True # django.db.migrations is always used, but models import may not be. # If models import exists, merge it with migrations import. if "from django.db import models" in imports: imports.discard("from django.db import models") imports.add("from django.db import migrations, models") else: imports.add("from django.db import migrations") # Sort imports by the package / module to be imported (the part after # "from" in "from ... import ..." or after "import" in "import ..."). sorted_imports = sorted(imports, key=lambda i: i.split()[1]) items["imports"] = "\n".join(sorted_imports) + "\n" if imports else "" if migration_imports: items["imports"] += ( "\n\n# Functions from the following migrations need manual " "copying.\n# Move them and any dependencies into this file, " "then update the\n# RunPython operations to refer to the local " "versions:\n# %s" ) % "\n# ".join(sorted(migration_imports)) # If there's a replaces, make a string for it if self.migration.replaces: items["replaces_str"] = ( "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0] ) # Hinting that goes into comment if self.include_header: items["migration_header"] = MIGRATION_HEADER_TEMPLATE % { "version": get_version(), "timestamp": now().strftime("%Y-%m-%d %H:%M"), } else: items["migration_header"] = "" if self.migration.initial: items["initial_str"] = "\n initial = True\n" return MIGRATION_TEMPLATE % items @property def basedir(self): migrations_package_name, _ = MigrationLoader.migrations_module( self.migration.app_label ) if migrations_package_name is None: raise ValueError( "Django can't create migrations for app '%s' because " "migrations have been disabled via the MIGRATION_MODULES " "setting." % self.migration.app_label ) # See if we can import the migrations module directly try: migrations_module = import_module(migrations_package_name) except ImportError: pass else: try: return module_dir(migrations_module) except ValueError: pass # Alright, see if it's a direct submodule of the app app_config = apps.get_app_config(self.migration.app_label) ( maybe_app_name, _, migrations_package_basename, ) = migrations_package_name.rpartition(".") if app_config.name == maybe_app_name: return os.path.join(app_config.path, migrations_package_basename) # In case of using MIGRATION_MODULES setting and the custom package # doesn't exist, create one, starting from an existing package existing_dirs, missing_dirs = migrations_package_name.split("."), [] while existing_dirs: missing_dirs.insert(0, existing_dirs.pop(-1)) try: base_module = import_module(".".join(existing_dirs)) except (ImportError, ValueError): continue else: try: base_dir = module_dir(base_module) except ValueError: continue else: break else: raise ValueError( "Could not locate an appropriate location to create " "migrations package %s. Make sure the toplevel " "package exists and can be imported." % migrations_package_name ) final_dir = os.path.join(base_dir, *missing_dirs) os.makedirs(final_dir, exist_ok=True) for missing_dir in missing_dirs: base_dir = os.path.join(base_dir, missing_dir) with open(os.path.join(base_dir, "__init__.py"), "w"): pass return final_dir @property def filename(self): return "%s.py" % self.migration.name @property def path(self): return os.path.join(self.basedir, self.filename) @classmethod def serialize(cls, value): return serializer_factory(value).serialize() @classmethod def register_serializer(cls, type_, serializer): Serializer.register(type_, serializer) @classmethod def unregister_serializer(cls, type_): Serializer.unregister(type_) MIGRATION_HEADER_TEMPLATE = """\ # Generated by Django %(version)s on %(timestamp)s """ MIGRATION_TEMPLATE = """\ %(migration_header)s%(imports)s class Migration(migrations.Migration): %(replaces_str)s%(initial_str)s dependencies = [ %(dependencies)s\ ] operations = [ %(operations)s\ ] """
castiel248/Convert
Lib/site-packages/django/db/migrations/writer.py
Python
mit
11,458
from django.core.exceptions import ObjectDoesNotExist from django.db.models import signals from django.db.models.aggregates import * # NOQA from django.db.models.aggregates import __all__ as aggregates_all from django.db.models.constraints import * # NOQA from django.db.models.constraints import __all__ as constraints_all from django.db.models.deletion import ( CASCADE, DO_NOTHING, PROTECT, RESTRICT, SET, SET_DEFAULT, SET_NULL, ProtectedError, RestrictedError, ) from django.db.models.enums import * # NOQA from django.db.models.enums import __all__ as enums_all from django.db.models.expressions import ( Case, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, OrderBy, OuterRef, RowRange, Subquery, Value, ValueRange, When, Window, WindowFrame, ) from django.db.models.fields import * # NOQA from django.db.models.fields import __all__ as fields_all from django.db.models.fields.files import FileField, ImageField from django.db.models.fields.json import JSONField from django.db.models.fields.proxy import OrderWrt from django.db.models.indexes import * # NOQA from django.db.models.indexes import __all__ as indexes_all from django.db.models.lookups import Lookup, Transform from django.db.models.manager import Manager from django.db.models.query import Prefetch, QuerySet, prefetch_related_objects from django.db.models.query_utils import FilteredRelation, Q # Imports that would create circular imports if sorted from django.db.models.base import DEFERRED, Model # isort:skip from django.db.models.fields.related import ( # isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ForeignObjectRel, ManyToOneRel, ManyToManyRel, OneToOneRel, ) __all__ = aggregates_all + constraints_all + enums_all + fields_all + indexes_all __all__ += [ "ObjectDoesNotExist", "signals", "CASCADE", "DO_NOTHING", "PROTECT", "RESTRICT", "SET", "SET_DEFAULT", "SET_NULL", "ProtectedError", "RestrictedError", "Case", "Exists", "Expression", "ExpressionList", "ExpressionWrapper", "F", "Func", "OrderBy", "OuterRef", "RowRange", "Subquery", "Value", "ValueRange", "When", "Window", "WindowFrame", "FileField", "ImageField", "JSONField", "OrderWrt", "Lookup", "Transform", "Manager", "Prefetch", "Q", "QuerySet", "prefetch_related_objects", "DEFERRED", "Model", "FilteredRelation", "ForeignKey", "ForeignObject", "OneToOneField", "ManyToManyField", "ForeignObjectRel", "ManyToOneRel", "ManyToManyRel", "OneToOneRel", ]
castiel248/Convert
Lib/site-packages/django/db/models/__init__.py
Python
mit
2,774
""" Classes to represent the definitions of aggregate functions. """ from django.core.exceptions import FieldError, FullResultSet from django.db.models.expressions import Case, Func, Star, Value, When from django.db.models.fields import IntegerField from django.db.models.functions.comparison import Coalesce from django.db.models.functions.mixins import ( FixDurationInputMixin, NumericOutputFieldMixin, ) __all__ = [ "Aggregate", "Avg", "Count", "Max", "Min", "StdDev", "Sum", "Variance", ] class Aggregate(Func): template = "%(function)s(%(distinct)s%(expressions)s)" contains_aggregate = True name = None filter_template = "%s FILTER (WHERE %%(filter)s)" window_compatible = True allow_distinct = False empty_result_set_value = None def __init__( self, *expressions, distinct=False, filter=None, default=None, **extra ): if distinct and not self.allow_distinct: raise TypeError("%s does not allow distinct." % self.__class__.__name__) if default is not None and self.empty_result_set_value is not None: raise TypeError(f"{self.__class__.__name__} does not allow default.") self.distinct = distinct self.filter = filter self.default = default super().__init__(*expressions, **extra) def get_source_fields(self): # Don't return the filter expression since it's not a source field. return [e._output_field_or_none for e in super().get_source_expressions()] def get_source_expressions(self): source_expressions = super().get_source_expressions() if self.filter: return source_expressions + [self.filter] return source_expressions def set_source_expressions(self, exprs): self.filter = self.filter and exprs.pop() return super().set_source_expressions(exprs) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Aggregates are not allowed in UPDATE queries, so ignore for_save c = super().resolve_expression(query, allow_joins, reuse, summarize) c.filter = c.filter and c.filter.resolve_expression( query, allow_joins, reuse, summarize ) if summarize: # Summarized aggregates cannot refer to summarized aggregates. for ref in c.get_refs(): if query.annotations[ref].is_summary: raise FieldError( f"Cannot compute {c.name}('{ref}'): '{ref}' is an aggregate" ) elif not self.is_summary: # Call Aggregate.get_source_expressions() to avoid # returning self.filter and including that in this loop. expressions = super(Aggregate, c).get_source_expressions() for index, expr in enumerate(expressions): if expr.contains_aggregate: before_resolved = self.get_source_expressions()[index] name = ( before_resolved.name if hasattr(before_resolved, "name") else repr(before_resolved) ) raise FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name) ) if (default := c.default) is None: return c if hasattr(default, "resolve_expression"): default = default.resolve_expression(query, allow_joins, reuse, summarize) if default._output_field_or_none is None: default.output_field = c._output_field_or_none else: default = Value(default, c._output_field_or_none) c.default = None # Reset the default argument before wrapping. coalesce = Coalesce(c, default, output_field=c._output_field_or_none) coalesce.is_summary = c.is_summary return coalesce @property def default_alias(self): expressions = self.get_source_expressions() if len(expressions) == 1 and hasattr(expressions[0], "name"): return "%s__%s" % (expressions[0].name, self.name.lower()) raise TypeError("Complex expressions require an alias") def get_group_by_cols(self): return [] def as_sql(self, compiler, connection, **extra_context): extra_context["distinct"] = "DISTINCT " if self.distinct else "" if self.filter: if connection.features.supports_aggregate_filter_clause: try: filter_sql, filter_params = self.filter.as_sql(compiler, connection) except FullResultSet: pass else: template = self.filter_template % extra_context.get( "template", self.template ) sql, params = super().as_sql( compiler, connection, template=template, filter=filter_sql, **extra_context, ) return sql, (*params, *filter_params) else: copy = self.copy() copy.filter = None source_expressions = copy.get_source_expressions() condition = When(self.filter, then=source_expressions[0]) copy.set_source_expressions([Case(condition)] + source_expressions[1:]) return super(Aggregate, copy).as_sql( compiler, connection, **extra_context ) return super().as_sql(compiler, connection, **extra_context) def _get_repr_options(self): options = super()._get_repr_options() if self.distinct: options["distinct"] = self.distinct if self.filter: options["filter"] = self.filter return options class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate): function = "AVG" name = "Avg" allow_distinct = True class Count(Aggregate): function = "COUNT" name = "Count" output_field = IntegerField() allow_distinct = True empty_result_set_value = 0 def __init__(self, expression, filter=None, **extra): if expression == "*": expression = Star() if isinstance(expression, Star) and filter is not None: raise ValueError("Star cannot be used with filter. Please specify a field.") super().__init__(expression, filter=filter, **extra) class Max(Aggregate): function = "MAX" name = "Max" class Min(Aggregate): function = "MIN" name = "Min" class StdDev(NumericOutputFieldMixin, Aggregate): name = "StdDev" def __init__(self, expression, sample=False, **extra): self.function = "STDDEV_SAMP" if sample else "STDDEV_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "STDDEV_SAMP"} class Sum(FixDurationInputMixin, Aggregate): function = "SUM" name = "Sum" allow_distinct = True class Variance(NumericOutputFieldMixin, Aggregate): name = "Variance" def __init__(self, expression, sample=False, **extra): self.function = "VAR_SAMP" if sample else "VAR_POP" super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), "sample": self.function == "VAR_SAMP"}
castiel248/Convert
Lib/site-packages/django/db/models/aggregates.py
Python
mit
7,642
import copy import inspect import warnings from functools import partialmethod from itertools import chain from asgiref.sync import sync_to_async import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.expressions import RawSQL from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import AltersData, make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return "<Deferred field>" def __str__(self): return "<Deferred field>" DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type( name, bases, { "__module__": module, "__qualname__": "%s.%s" % (attached_to.__qualname__, name), }, ) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, "contribute_to_class") class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop("__module__") new_attrs = {"__module__": module} classcell = attrs.pop("__classcell__", None) if classcell is not None: new_attrs["__classcell__"] = classcell attr_meta = attrs.pop("Meta", None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, "abstract", False) meta = attr_meta or getattr(new_class, "Meta", None) base_meta = getattr(new_class, "_meta", None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, "app_label", None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class("_meta", Options(meta, app_label)) if not abstract: new_class.add_to_class( "DoesNotExist", subclass_exception( "DoesNotExist", tuple( x.DoesNotExist for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class, ), ) new_class.add_to_class( "MultipleObjectsReturned", subclass_exception( "MultipleObjectsReturned", tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, "_meta") and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class, ), ) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, "ordering"): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, "get_latest_by"): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError( "%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped) ) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields, ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, "_meta")]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError( "Proxy model '%s' has more than one non-abstract model base " "class." % name ) if base is None: raise TypeError( "Proxy model '%s' has no non-abstract model base class." % name ) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, "_meta"): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, "_meta"): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = "%s_ptr" % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if ( field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes ): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( "Local field %r in class %r clashes with field of " "the same name from base class %r." % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [ copy.deepcopy(idx) for idx in new_class._meta.indexes ] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=True ) cls.get_previous_in_order = partialmethod( cls._get_next_or_previous_in_order, is_next=False ) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % ( cls.__name__, ", ".join(f.name for f in opts.fields), ) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get( opts.label_lower ) if get_absolute_url_override: setattr(cls, "get_absolute_url", get_absolute_url_override) if not opts.managers: if any(f.name == "objects" for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class("objects", manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(AltersData, metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED if opts.abstract: raise TypeError("Abstract models cannot be instantiated.") pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: raise TypeError( f"{cls.__qualname__}() got both positional and " f"keyword arguments for field '{field.name}'." ) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names unexpected = () for prop, value in kwargs.items(): # Any remaining kwargs must correspond to properties or virtual # fields. if prop in property_names: if value is not _DEFERRED: _setattr(self, prop, value) else: try: opts.get_field(prop) except FieldDoesNotExist: unexpected += (prop,) else: if value is not _DEFERRED: _setattr(self, prop, value) if unexpected: unexpected_names = ", ".join(repr(n) for n in unexpected) raise TypeError( f"{cls.__name__}() got unexpected keyword arguments: " f"{unexpected_names}" ) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __str__(self): return "%s object (%s)" % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" state = self.__dict__.copy() state["_state"] = copy.copy(state["_state"]) state["_state"].fields_cache = state["_state"].fields_cache.copy() # memoryview cannot be pickled, so cast it to bytes and store # separately. _memoryview_attrs = [] for attr, value in state.items(): if isinstance(value, memoryview): _memoryview_attrs.append((attr, bytes(value))) if _memoryview_attrs: state["_memoryview_attrs"] = _memoryview_attrs for attr, value in _memoryview_attrs: state.pop(attr) return state def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) if "_memoryview_attrs" in state: for attr, value in state.pop("_memoryview_attrs"): state[attr] = memoryview(value) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, "_prefetched_objects_cache", ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' "are not allowed in fields." % LOOKUP_SEP ) hints = {"instance": self} db_instance_qs = self.__class__._base_manager.db_manager( using, hints=hints ).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [ f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields ] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) # Clear cached private relations. for field in self._meta.private_fields: if field.is_relation and field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db async def arefresh_from_db(self, using=None, fields=None): return await sync_to_async(self.refresh_from_db)(using=using, fields=fields) def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save( self, force_insert=False, force_update=False, using=None, update_fields=None ): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ self._prepare_related_fields_for_save(operation_name="save") using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = self._meta._non_pk_concrete_field_names non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError( "The following fields do not exist in this model, are m2m " "fields, or are non-concrete fields: %s" % ", ".join(non_model_fields) ) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, "through"): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base( using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields, ) save.alters_data = True async def asave( self, force_insert=False, force_update=False, using=None, update_fields=None ): return await sync_to_async(self.save)( force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields, ) asave.alters_data = True def save_base( self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if ( field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None ): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents( cls=parent, using=using, update_fields=update_fields ) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table( self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None, ): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [ f for f in non_pks if f.name in update_fields or f.attname in update_fields ] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [ ( f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)), ) for f in non_pks ] forced_update = update_fields or force_update updated = self._do_update( base_qs, using, pk_val, values, update_fields, forced_update ) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = ( cls._base_manager.using(using) .filter(**filter_args) .aggregate( _order__max=Coalesce( ExpressionWrapper( Max("_order") + Value(1), output_field=IntegerField() ), Value(0), ), )["_order__max"] ) fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert( cls._base_manager, using, fields, returning_fields, raw ) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def _prepare_related_fields_for_save(self, operation_name, fields=None): # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey, GenericForeignKey or OneToOneField on this model. If # the field is nullable, allowing the save would result in silent data # loss. for field in self._meta.concrete_fields: if fields and field not in fields: continue # If the related field isn't cached, then an instance hasn't been # assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "%s() prohibited to prevent data loss due to unsaved " "related object '%s'." % (operation_name, field.name) ) elif getattr(self, field.attname) in field.empty_values: # Set related object if it has been saved after an # assignment. setattr(self, field.name, obj) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr( self, field.attname ): field.delete_cached_value(self) # GenericForeignKeys are private. for field in self._meta.private_fields: if fields and field not in fields: continue if ( field.is_relation and field.is_cached(self) and hasattr(field, "fk_field") ): obj = field.get_cached_value(self, default=None) if obj and obj.pk is None: raise ValueError( f"{operation_name}() prohibited to prevent data loss due to " f"unsaved related object '{field.name}'." ) def delete(self, using=None, keep_parents=False): if self.pk is None: raise ValueError( "%s object can't be deleted because its %s attribute is set " "to None." % (self._meta.object_name, self._meta.pk.attname) ) using = using or router.db_for_write(self.__class__, instance=self) collector = Collector(using=using, origin=self) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True async def adelete(self, using=None, keep_parents=False): return await sync_to_async(self.delete)( using=using, keep_parents=keep_parents, ) adelete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str( choices_dict.get(make_hashable(value), value), strings_only=True ) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = "gt" if is_next else "lt" order = "" if is_next else "-" param = getattr(self, field.attname) q = Q.create([(field.name, param), (f"pk__{op}", self.pk)], connector=Q.AND) q = Q.create([q, (f"{field.name}__{op}", param)], connector=Q.OR) qs = ( self.__class__._default_manager.using(self._state.db) .filter(**kwargs) .filter(q) .order_by("%s%s" % (order, field.name), "%spk" % order) ) try: return qs[0] except IndexError: raise self.DoesNotExist( "%s matching query does not exist." % self.__class__._meta.object_name ) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = "gt" if is_next else "lt" order = "_order" if is_next else "-_order" order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = ( self.__class__._default_manager.filter(**filter_args) .filter( **{ "_order__%s" % op: self.__class__._default_manager.values("_order").filter( **{self._meta.pk.name: self.pk} ) } ) .order_by(order)[:1] .get() ) setattr(self, cachename, obj) return getattr(self, cachename) def _get_field_value_map(self, meta, exclude=None): if exclude is None: exclude = set() meta = meta or self._meta return { field.name: Value(getattr(self, field.attname), field) for field in meta.local_concrete_fields if field.name not in exclude } def prepare_database_save(self, field): if self.pk is None: raise ValueError( "Unsaved model instance %r cannot be used in an ORM query." % self ) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None, include_meta_constraints=False): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = set() unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [] if include_meta_constraints: constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append( (parent_class, parent_class._meta.unique_together) ) if include_meta_constraints and parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) if include_meta_constraints: for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, "date", name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, "year", name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, "month", name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if lookup_value is None or ( lookup_value == "" and connection.features.interprets_empty_strings_as_nulls ): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append( self.unique_error_message(model_class, unique_check) ) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == "date": lookup_kwargs["%s__day" % unique_for] = date.day lookup_kwargs["%s__month" % unique_for] = date.month lookup_kwargs["%s__year" % unique_for] = date.year else: lookup_kwargs["%s__%s" % (unique_for, lookup_type)] = getattr( date, lookup_type ) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages["unique_for_date"], code="unique_for_date", params={ "model": self, "model_name": capfirst(opts.verbose_name), "lookup_type": lookup_type, "field": field_name, "field_label": capfirst(field.verbose_name), "date_field": unique_for, "date_field_label": capfirst(opts.get_field(unique_for).verbose_name), }, ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { "model": self, "model_class": model_class, "model_name": capfirst(opts.verbose_name), "unique_check": unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params["field_label"] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages["unique"], code="unique", params=params, ) # unique_together else: field_labels = [ capfirst(opts.get_field(f).verbose_name) for f in unique_check ] params["field_labels"] = get_text_list(field_labels, _("and")) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code="unique_together", params=params, ) def get_constraints(self): constraints = [(self.__class__, self._meta.constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.constraints: constraints.append((parent_class, parent_class._meta.constraints)) return constraints def validate_constraints(self, exclude=None): constraints = self.get_constraints() using = router.db_for_write(self.__class__, instance=self) errors = {} for model_class, model_constraints in constraints: for constraint in model_constraints: try: constraint.validate(model_class, self, exclude=exclude, using=using) except ValidationError as e: if ( getattr(e, "code", None) == "unique" and len(constraint.fields) == 1 ): errors.setdefault(constraint.fields[0], []).append(e) else: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def full_clean(self, exclude=None, validate_unique=True, validate_constraints=True): """ Call clean_fields(), clean(), validate_unique(), and validate_constraints() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = set() else: exclude = set(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Run constraints checks, but only for fields that passed validation. if validate_constraints: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.add(name) try: self.validate_constraints(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = set() errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [ *cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs), ] if not cls._meta.swapped: databases = kwargs.get("databases") or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), *cls._check_default_pk(), *cls._check_db_table_comment(databases), ] return errors @classmethod def _check_default_pk(cls): if ( not cls._meta.abstract and cls._meta.pk.auto_created and # Inherited PKs are checked in parents models. not ( isinstance(cls._meta.pk, OneToOneField) and cls._meta.pk.remote_field.parent_link ) and not settings.is_overridden("DEFAULT_AUTO_FIELD") and cls._meta.app_config and not cls._meta.app_config._is_default_auto_field_overridden ): return [ checks.Warning( f"Auto-created primary key used when not defining a " f"primary key type, by default " f"'{settings.DEFAULT_AUTO_FIELD}'.", hint=( f"Configure the DEFAULT_AUTO_FIELD setting or the " f"{cls._meta.app_config.__class__.__qualname__}." f"default_auto_field attribute to point to a subclass " f"of AutoField, e.g. 'django.db.models.BigAutoField'." ), obj=cls, id="models.W042", ), ] return [] @classmethod def _check_db_table_comment(cls, databases): if not cls._meta.db_table_comment: return [] errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_comments or "supports_comments" in cls._meta.required_db_features ): errors.append( checks.Warning( f"{connection.display_name} does not support comments on " f"tables (db_table_comment).", obj=cls, id="models.W046", ) ) return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id="models.E001", ) ) except LookupError: app_label, model_name = cls._meta.swapped.split(".") errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % (cls._meta.swappable, app_label, model_name), id="models.E002", ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id="models.E017", ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """Check if no relationship model is used by more than one m2m field.""" errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = ( f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields, ) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id="models.E003", ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [ f for f in cls._meta.local_fields if f.name == "id" and f != cls._meta.pk ] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == "id": return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id="models.E004", ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % (clash.name, clash.model._meta, f.name, f.model._meta), obj=cls, id="models.E005", ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = ( f.name == "id" and clash and clash.name == "id" and clash.model == cls ) if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % (f.name, clash.name, clash.model._meta), obj=f, id="models.E006", ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id="models.E007", ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith("_") or model_name.endswith("_"): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id="models.E023", ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id="models.E024", ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id="models.E025", ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id="models.E026", ) ) return errors # RemovedInDjango51Warning. @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id="models.E008", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together ): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id="models.E009", ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id="models.E010", ) ] elif any( not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together ): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id="models.E011", ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] references = set() for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == "_" or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id="models.E033", ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id="models.E034", ), ) if index.contains_expressions: for expression in index.expressions: references.update( ref[0] for ref in cls._get_expr_references(expression) ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with conditions." % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W037", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes with non-key columns." % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W040", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any(index.contains_expressions for index in cls._meta.indexes): errors.append( checks.Warning( "%s does not support indexes on expressions." % connection.display_name, hint=( "An index won't be created. Silence this warning " "if you don't care about it." ), obj=cls, id="models.W043", ) ) fields = [ field for index in cls._meta.indexes for field, _ in index.fields_orders ] fields += [include for index in cls._meta.indexes for include in index.include] fields += references errors.extend(cls._check_local_fields(fields, "indexes")) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, "attname"): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id="models.E012", ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id="models.E013", ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model " "'%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id="models.E016", ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id="models.E021", ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by " "only one field).", obj=cls, id="models.E014", ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != "?") # Convert "-field" to "field". fields = ((f[1:] if f.startswith("-") else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == "pk": fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.path_infos[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id="models.E015", ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != "pk"} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set( chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) ) ) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id="models.E015", ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if ( f.db_column is None and column_name is not None and len(column_name) > allowed_len ): errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id="models.E018", ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if ( m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len ): errors.append( checks.Error( "Autogenerated column name too long for M2M field " '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id="models.E019", ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, "get_source_expressions"): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support check constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W027", ) ) if not ( connection.features.supports_partial_indexes or "supports_partial_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with " "conditions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W036", ) ) if not ( connection.features.supports_deferrable_unique_constraints or "supports_deferrable_unique_constraints" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support deferrable unique constraints." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W038", ) ) if not ( connection.features.supports_covering_indexes or "supports_covering_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints with non-key " "columns." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W039", ) ) if not ( connection.features.supports_expression_indexes or "supports_expression_indexes" in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.contains_expressions for constraint in cls._meta.constraints ): errors.append( checks.Warning( "%s does not support unique constraints on " "expressions." % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id="models.W044", ) ) fields = set( chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) ) ) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or "supports_partial_indexes" not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update( cls._get_expr_references(constraint.condition) ) if ( connection.features.supports_expression_indexes or "supports_expression_indexes" not in cls._meta.required_db_features ) and constraint.contains_expressions: for expression in constraint.expressions: references.update(cls._get_expr_references(expression)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or "supports_table_check_constraints" not in cls._meta.required_db_features ): if isinstance(constraint.check, Q): references.update( cls._get_expr_references(constraint.check) ) if any( isinstance(expr, RawSQL) for expr in constraint.check.flatten() ): errors.append( checks.Warning( f"Check constraint {constraint.name!r} contains " f"RawSQL() expression and won't be validated " f"during the model full_clean().", hint=( "Silence this warning if you don't care about " "it." ), obj=cls, id="models.W045", ), ) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != "pk": fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == "pk": field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( hasattr(field, "get_transform") and hasattr(field, "get_lookup") and field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id="models.E041", ) ) errors.extend(cls._check_local_fields(fields, "constraints")) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update( [ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list)], ["_order"], ) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, "get_%s_order" % model.__name__.lower(), partialmethod(method_get_order, model), ) setattr( related_model, "set_%s_order" % model.__name__.lower(), partialmethod(method_set_order, model), ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
castiel248/Convert
Lib/site-packages/django/db/models/base.py
Python
mit
100,332
""" Constants used across the ORM in general. """ from enum import Enum # Separator used to split filter strings apart. LOOKUP_SEP = "__" class OnConflict(Enum): IGNORE = "ignore" UPDATE = "update"
castiel248/Convert
Lib/site-packages/django/db/models/constants.py
Python
mit
209
from enum import Enum from django.core.exceptions import FieldError, ValidationError from django.db import connections from django.db.models.expressions import Exists, ExpressionList, F, OrderBy from django.db.models.indexes import IndexExpression from django.db.models.lookups import Exact from django.db.models.query_utils import Q from django.db.models.sql.query import Query from django.db.utils import DEFAULT_DB_ALIAS from django.utils.translation import gettext_lazy as _ __all__ = ["BaseConstraint", "CheckConstraint", "Deferrable", "UniqueConstraint"] class BaseConstraint: default_violation_error_message = _("Constraint “%(name)s” is violated.") violation_error_message = None def __init__(self, name, violation_error_message=None): self.name = name if violation_error_message is not None: self.violation_error_message = violation_error_message else: self.violation_error_message = self.default_violation_error_message @property def contains_expressions(self): return False def constraint_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def create_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def remove_sql(self, model, schema_editor): raise NotImplementedError("This method must be implemented by a subclass.") def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): raise NotImplementedError("This method must be implemented by a subclass.") def get_violation_error_message(self): return self.violation_error_message % {"name": self.name} def deconstruct(self): path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) path = path.replace("django.db.models.constraints", "django.db.models") kwargs = {"name": self.name} if ( self.violation_error_message is not None and self.violation_error_message != self.default_violation_error_message ): kwargs["violation_error_message"] = self.violation_error_message return (path, (), kwargs) def clone(self): _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) class CheckConstraint(BaseConstraint): def __init__(self, *, check, name, violation_error_message=None): self.check = check if not getattr(check, "conditional", False): raise TypeError( "CheckConstraint.check must be a Q instance or boolean expression." ) super().__init__(name, violation_error_message=violation_error_message) def _get_check_sql(self, model, schema_editor): query = Query(model=model, alias_cols=False) where = query.build_where(self.check) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._check_sql(self.name, check) def create_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._create_check_sql(model, self.name, check) def remove_sql(self, model, schema_editor): return schema_editor._delete_check_sql(model, self.name) def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): against = instance._get_field_value_map(meta=model._meta, exclude=exclude) try: if not Q(self.check).check(against, using=using): raise ValidationError(self.get_violation_error_message()) except FieldError: pass def __repr__(self): return "<%s: check=%s name=%s>" % ( self.__class__.__qualname__, self.check, repr(self.name), ) def __eq__(self, other): if isinstance(other, CheckConstraint): return ( self.name == other.name and self.check == other.check and self.violation_error_message == other.violation_error_message ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs["check"] = self.check return path, args, kwargs class Deferrable(Enum): DEFERRED = "deferred" IMMEDIATE = "immediate" # A similar format was proposed for Python 3.10. def __repr__(self): return f"{self.__class__.__qualname__}.{self._name_}" class UniqueConstraint(BaseConstraint): def __init__( self, *expressions, fields=(), name=None, condition=None, deferrable=None, include=None, opclasses=(), violation_error_message=None, ): if not name: raise ValueError("A unique constraint must be named.") if not expressions and not fields: raise ValueError( "At least one field or expression is required to define a " "unique constraint." ) if expressions and fields: raise ValueError( "UniqueConstraint.fields and expressions are mutually exclusive." ) if not isinstance(condition, (type(None), Q)): raise ValueError("UniqueConstraint.condition must be a Q instance.") if condition and deferrable: raise ValueError("UniqueConstraint with conditions cannot be deferred.") if include and deferrable: raise ValueError("UniqueConstraint with include fields cannot be deferred.") if opclasses and deferrable: raise ValueError("UniqueConstraint with opclasses cannot be deferred.") if expressions and deferrable: raise ValueError("UniqueConstraint with expressions cannot be deferred.") if expressions and opclasses: raise ValueError( "UniqueConstraint.opclasses cannot be used with expressions. " "Use django.contrib.postgres.indexes.OpClass() instead." ) if not isinstance(deferrable, (type(None), Deferrable)): raise ValueError( "UniqueConstraint.deferrable must be a Deferrable instance." ) if not isinstance(include, (type(None), list, tuple)): raise ValueError("UniqueConstraint.include must be a list or tuple.") if not isinstance(opclasses, (list, tuple)): raise ValueError("UniqueConstraint.opclasses must be a list or tuple.") if opclasses and len(fields) != len(opclasses): raise ValueError( "UniqueConstraint.fields and UniqueConstraint.opclasses must " "have the same number of elements." ) self.fields = tuple(fields) self.condition = condition self.deferrable = deferrable self.include = tuple(include) if include else () self.opclasses = opclasses self.expressions = tuple( F(expression) if isinstance(expression, str) else expression for expression in expressions ) super().__init__(name, violation_error_message=violation_error_message) @property def contains_expressions(self): return bool(self.expressions) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def _get_index_expressions(self, model, schema_editor): if not self.expressions: return None index_expressions = [] for expression in self.expressions: index_expression = IndexExpression(expression) index_expression.set_wrapper_classes(schema_editor.connection) index_expressions.append(index_expression) return ExpressionList(*index_expressions).resolve_expression( Query(model, alias_cols=False), ) def constraint_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [ model._meta.get_field(field_name).column for field_name in self.include ] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def create_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [ model._meta.get_field(field_name).column for field_name in self.include ] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._create_unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def remove_sql(self, model, schema_editor): condition = self._get_condition_sql(model, schema_editor) include = [ model._meta.get_field(field_name).column for field_name in self.include ] expressions = self._get_index_expressions(model, schema_editor) return schema_editor._delete_unique_sql( model, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def __repr__(self): return "<%s:%s%s%s%s%s%s%s>" % ( self.__class__.__qualname__, "" if not self.fields else " fields=%s" % repr(self.fields), "" if not self.expressions else " expressions=%s" % repr(self.expressions), " name=%s" % repr(self.name), "" if self.condition is None else " condition=%s" % self.condition, "" if self.deferrable is None else " deferrable=%r" % self.deferrable, "" if not self.include else " include=%s" % repr(self.include), "" if not self.opclasses else " opclasses=%s" % repr(self.opclasses), ) def __eq__(self, other): if isinstance(other, UniqueConstraint): return ( self.name == other.name and self.fields == other.fields and self.condition == other.condition and self.deferrable == other.deferrable and self.include == other.include and self.opclasses == other.opclasses and self.expressions == other.expressions and self.violation_error_message == other.violation_error_message ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() if self.fields: kwargs["fields"] = self.fields if self.condition: kwargs["condition"] = self.condition if self.deferrable: kwargs["deferrable"] = self.deferrable if self.include: kwargs["include"] = self.include if self.opclasses: kwargs["opclasses"] = self.opclasses return path, self.expressions, kwargs def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS): queryset = model._default_manager.using(using) if self.fields: lookup_kwargs = {} for field_name in self.fields: if exclude and field_name in exclude: return field = model._meta.get_field(field_name) lookup_value = getattr(instance, field.attname) if lookup_value is None or ( lookup_value == "" and connections[using].features.interprets_empty_strings_as_nulls ): # A composite constraint containing NULL value cannot cause # a violation since NULL != NULL in SQL. return lookup_kwargs[field.name] = lookup_value queryset = queryset.filter(**lookup_kwargs) else: # Ignore constraints with excluded fields. if exclude: for expression in self.expressions: if hasattr(expression, "flatten"): for expr in expression.flatten(): if isinstance(expr, F) and expr.name in exclude: return elif isinstance(expression, F) and expression.name in exclude: return replacements = { F(field): value for field, value in instance._get_field_value_map( meta=model._meta, exclude=exclude ).items() } expressions = [] for expr in self.expressions: # Ignore ordering. if isinstance(expr, OrderBy): expr = expr.expression expressions.append(Exact(expr, expr.replace_expressions(replacements))) queryset = queryset.filter(*expressions) model_class_pk = instance._get_pk_val(model._meta) if not instance._state.adding and model_class_pk is not None: queryset = queryset.exclude(pk=model_class_pk) if not self.condition: if queryset.exists(): if self.expressions: raise ValidationError(self.get_violation_error_message()) # When fields are defined, use the unique_error_message() for # backward compatibility. for model, constraints in instance.get_constraints(): for constraint in constraints: if constraint is self: raise ValidationError( instance.unique_error_message(model, self.fields) ) else: against = instance._get_field_value_map(meta=model._meta, exclude=exclude) try: if (self.condition & Exists(queryset.filter(self.condition))).check( against, using=using ): raise ValidationError(self.get_violation_error_message()) except FieldError: pass
castiel248/Convert
Lib/site-packages/django/db/models/constraints.py
Python
mit
15,389
from collections import Counter, defaultdict from functools import partial, reduce from itertools import chain from operator import attrgetter, or_ from django.db import IntegrityError, connections, models, transaction from django.db.models import query_utils, signals, sql class ProtectedError(IntegrityError): def __init__(self, msg, protected_objects): self.protected_objects = protected_objects super().__init__(msg, protected_objects) class RestrictedError(IntegrityError): def __init__(self, msg, restricted_objects): self.restricted_objects = restricted_objects super().__init__(msg, restricted_objects) def CASCADE(collector, field, sub_objs, using): collector.collect( sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null, fail_on_restricted=False, ) if field.null and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) def PROTECT(collector, field, sub_objs, using): raise ProtectedError( "Cannot delete some instances of model '%s' because they are " "referenced through a protected foreign key: '%s.%s'" % ( field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name, ), sub_objs, ) def RESTRICT(collector, field, sub_objs, using): collector.add_restricted_objects(field, sub_objs) collector.add_dependency(field.remote_field.model, field.model) def SET(value): if callable(value): def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value(), sub_objs) else: def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value, sub_objs) set_on_delete.deconstruct = lambda: ("django.db.models.SET", (value,), {}) set_on_delete.lazy_sub_objs = True return set_on_delete def SET_NULL(collector, field, sub_objs, using): collector.add_field_update(field, None, sub_objs) SET_NULL.lazy_sub_objs = True def SET_DEFAULT(collector, field, sub_objs, using): collector.add_field_update(field, field.get_default(), sub_objs) SET_DEFAULT.lazy_sub_objs = True def DO_NOTHING(collector, field, sub_objs, using): pass def get_candidate_relations_to_delete(opts): # The candidate relations are the ones that come from N-1 and 1-1 relations. # N-N (i.e., many-to-many) relations aren't candidates for deletion. return ( f for f in opts.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) ) class Collector: def __init__(self, using, origin=None): self.using = using # A Model or QuerySet object. self.origin = origin # Initially, {model: {instances}}, later values become lists. self.data = defaultdict(set) # {(field, value): [instances, …]} self.field_updates = defaultdict(list) # {model: {field: {instances}}} self.restricted_objects = defaultdict(partial(defaultdict, set)) # fast_deletes is a list of queryset-likes that can be deleted without # fetching the objects into memory. self.fast_deletes = [] # Tracks deletion-order dependency for databases without transactions # or ability to defer constraint checks. Only concrete model classes # should be included, as the dependencies exist only between actual # database tables; proxy models are represented here by their concrete # parent. self.dependencies = defaultdict(set) # {model: {models}} def add(self, objs, source=None, nullable=False, reverse_dependency=False): """ Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data[model] for obj in objs: if obj not in instances: new_objs.append(obj) instances.update(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if source is not None and not nullable: self.add_dependency(source, model, reverse_dependency=reverse_dependency) return new_objs def add_dependency(self, model, dependency, reverse_dependency=False): if reverse_dependency: model, dependency = dependency, model self.dependencies[model._meta.concrete_model].add( dependency._meta.concrete_model ) self.data.setdefault(dependency, self.data.default_factory()) def add_field_update(self, field, value, objs): """ Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). """ self.field_updates[field, value].append(objs) def add_restricted_objects(self, field, objs): if objs: model = objs[0].__class__ self.restricted_objects[model][field].update(objs) def clear_restricted_objects_from_set(self, model, objs): if model in self.restricted_objects: self.restricted_objects[model] = { field: items - objs for field, items in self.restricted_objects[model].items() } def clear_restricted_objects_from_queryset(self, model, qs): if model in self.restricted_objects: objs = set( qs.filter( pk__in=[ obj.pk for objs in self.restricted_objects[model].values() for obj in objs ] ) ) self.clear_restricted_objects_from_set(model, objs) def _has_signal_listeners(self, model): return signals.pre_delete.has_listeners( model ) or signals.post_delete.has_listeners(model) def can_fast_delete(self, objs, from_field=None): """ Determine if the objects in the given queryset-like or single object can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child. """ if from_field and from_field.remote_field.on_delete is not CASCADE: return False if hasattr(objs, "_meta"): model = objs._meta.model elif hasattr(objs, "model") and hasattr(objs, "_raw_delete"): model = objs.model else: return False if self._has_signal_listeners(model): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta return ( all( link == from_field for link in opts.concrete_model._meta.parents.values() ) and # Foreign keys pointing to this model. all( related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts) ) and ( # Something like generic foreign key. not any( hasattr(field, "bulk_related_objects") for field in opts.private_fields ) ) ) def get_del_batches(self, objs, fields): """ Return the objs in suitably sized batches for the used connection. """ field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1 ) if len(objs) > conn_batch_size: return [ objs[i : i + conn_batch_size] for i in range(0, len(objs), conn_batch_size) ] else: return [objs] def collect( self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False, keep_parents=False, fail_on_restricted=True, ): """ Add 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. If 'reverse_dependency' is True, 'source' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.) If 'keep_parents' is True, data of parent model's will be not deleted. If 'fail_on_restricted' is False, error won't be raised even if it's prohibited to delete such objects due to RESTRICT, that defers restricted object checking in recursive calls where the top-level call may need to collect more objects to determine whether restricted ones can be deleted. """ if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add( objs, source, nullable, reverse_dependency=reverse_dependency ) if not new_objs: return model = new_objs[0].__class__ if not keep_parents: # Recursively collect concrete model's parent models, but not their # related objects. These will be found by meta.get_fields() concrete_model = model._meta.concrete_model for ptr in concrete_model._meta.parents.values(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect( parent_objs, source=model, source_attr=ptr.remote_field.related_name, collect_related=False, reverse_dependency=True, fail_on_restricted=False, ) if not collect_related: return if keep_parents: parents = set(model._meta.get_parent_list()) model_fast_deletes = defaultdict(list) protected_objects = defaultdict(list) for related in get_candidate_relations_to_delete(model._meta): # Preserve parent reverse relationships if keep_parents=True. if keep_parents and related.model in parents: continue field = related.field on_delete = field.remote_field.on_delete if on_delete == DO_NOTHING: continue related_model = related.related_model if self.can_fast_delete(related_model, from_field=field): model_fast_deletes[related_model].append(field) continue batches = self.get_del_batches(new_objs, [field]) for batch in batches: sub_objs = self.related_objects(related_model, [field], batch) # Non-referenced fields can be deferred if no signal receivers # are connected for the related model as they'll never be # exposed to the user. Skip field deferring when some # relationships are select_related as interactions between both # features are hard to get right. This should only happen in # the rare cases where .related_objects is overridden anyway. if not ( sub_objs.query.select_related or self._has_signal_listeners(related_model) ): referenced_fields = set( chain.from_iterable( (rf.attname for rf in rel.field.foreign_related_fields) for rel in get_candidate_relations_to_delete( related_model._meta ) ) ) sub_objs = sub_objs.only(*tuple(referenced_fields)) if getattr(on_delete, "lazy_sub_objs", False) or sub_objs: try: on_delete(self, field, sub_objs, self.using) except ProtectedError as error: key = "'%s.%s'" % (field.model.__name__, field.name) protected_objects[key] += error.protected_objects if protected_objects: raise ProtectedError( "Cannot delete some instances of model %r because they are " "referenced through protected foreign keys: %s." % ( model.__name__, ", ".join(protected_objects), ), set(chain.from_iterable(protected_objects.values())), ) for related_model, related_fields in model_fast_deletes.items(): batches = self.get_del_batches(new_objs, related_fields) for batch in batches: sub_objs = self.related_objects(related_model, related_fields, batch) self.fast_deletes.append(sub_objs) for field in model._meta.private_fields: if hasattr(field, "bulk_related_objects"): # It's something like generic foreign key. sub_objs = field.bulk_related_objects(new_objs, self.using) self.collect( sub_objs, source=model, nullable=True, fail_on_restricted=False ) if fail_on_restricted: # Raise an error if collected restricted objects (RESTRICT) aren't # candidates for deletion also collected via CASCADE. for related_model, instances in self.data.items(): self.clear_restricted_objects_from_set(related_model, instances) for qs in self.fast_deletes: self.clear_restricted_objects_from_queryset(qs.model, qs) if self.restricted_objects.values(): restricted_objects = defaultdict(list) for related_model, fields in self.restricted_objects.items(): for field, objs in fields.items(): if objs: key = "'%s.%s'" % (related_model.__name__, field.name) restricted_objects[key] += objs if restricted_objects: raise RestrictedError( "Cannot delete some instances of model %r because " "they are referenced through restricted foreign keys: " "%s." % ( model.__name__, ", ".join(restricted_objects), ), set(chain.from_iterable(restricted_objects.values())), ) def related_objects(self, related_model, related_fields, objs): """ Get a QuerySet of the related model to objs via related fields. """ predicate = query_utils.Q.create( [(f"{related_field.name}__in", objs) for related_field in related_fields], connector=query_utils.Q.OR, ) return related_model._base_manager.using(self.using).filter(predicate) def instances_with_model(self): for model, instances in self.data.items(): for obj in instances: yield model, obj def sort(self): sorted_models = [] concrete_models = set() models = list(self.data) while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model._meta.concrete_model) if not (dependencies and dependencies.difference(concrete_models)): sorted_models.append(model) concrete_models.add(model._meta.concrete_model) found = True if not found: return self.data = {model: self.data[model] for model in sorted_models} def delete(self): # sort instance collections for model, instances in self.data.items(): self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() # Optimize for the case with a single obj and no dependencies if len(self.data) == 1 and len(instances) == 1: instance = list(instances)[0] if self.can_fast_delete(instance): with transaction.mark_for_rollback_on_error(self.using): count = sql.DeleteQuery(model).delete_batch( [instance.pk], self.using ) setattr(instance, model._meta.pk.attname, None) return count, {model._meta.label: count} with transaction.atomic(using=self.using, savepoint=False): # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send( sender=model, instance=obj, using=self.using, origin=self.origin, ) # fast deletes for qs in self.fast_deletes: count = qs._raw_delete(using=self.using) if count: deleted_counter[qs.model._meta.label] += count # update fields for (field, value), instances_list in self.field_updates.items(): updates = [] objs = [] for instances in instances_list: if ( isinstance(instances, models.QuerySet) and instances._result_cache is None ): updates.append(instances) else: objs.extend(instances) if updates: combined_updates = reduce(or_, updates) combined_updates.update(**{field.name: value}) if objs: model = objs[0].__class__ query = sql.UpdateQuery(model) query.update_batch( list({obj.pk for obj in objs}), {field.name: value}, self.using ) # reverse instance collections for instances in self.data.values(): instances.reverse() # delete instances for model, instances in self.data.items(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] count = query.delete_batch(pk_list, self.using) if count: deleted_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: signals.post_delete.send( sender=model, instance=obj, using=self.using, origin=self.origin, ) for model, instances in self.data.items(): for instance in instances: setattr(instance, model._meta.pk.attname, None) return sum(deleted_counter.values()), dict(deleted_counter)
castiel248/Convert
Lib/site-packages/django/db/models/deletion.py
Python
mit
21,099
import enum from types import DynamicClassAttribute from django.utils.functional import Promise __all__ = ["Choices", "IntegerChoices", "TextChoices"] class ChoicesMeta(enum.EnumMeta): """A metaclass for creating a enum choices.""" def __new__(metacls, classname, bases, classdict, **kwds): labels = [] for key in classdict._member_names: value = classdict[key] if ( isinstance(value, (list, tuple)) and len(value) > 1 and isinstance(value[-1], (Promise, str)) ): *value, label = value value = tuple(value) else: label = key.replace("_", " ").title() labels.append(label) # Use dict.__setitem__() to suppress defenses against double # assignment in enum's classdict. dict.__setitem__(classdict, key, value) cls = super().__new__(metacls, classname, bases, classdict, **kwds) for member, label in zip(cls.__members__.values(), labels): member._label_ = label return enum.unique(cls) def __contains__(cls, member): if not isinstance(member, enum.Enum): # Allow non-enums to match against member values. return any(x.value == member for x in cls) return super().__contains__(member) @property def names(cls): empty = ["__empty__"] if hasattr(cls, "__empty__") else [] return empty + [member.name for member in cls] @property def choices(cls): empty = [(None, cls.__empty__)] if hasattr(cls, "__empty__") else [] return empty + [(member.value, member.label) for member in cls] @property def labels(cls): return [label for _, label in cls.choices] @property def values(cls): return [value for value, _ in cls.choices] class Choices(enum.Enum, metaclass=ChoicesMeta): """Class for creating enumerated choices.""" @DynamicClassAttribute def label(self): return self._label_ @property def do_not_call_in_templates(self): return True def __str__(self): """ Use value when cast to str, so that Choices set as model instance attributes are rendered as expected in templates and similar contexts. """ return str(self.value) # A similar format was proposed for Python 3.10. def __repr__(self): return f"{self.__class__.__qualname__}.{self._name_}" class IntegerChoices(int, Choices): """Class for creating enumerated integer choices.""" pass class TextChoices(str, Choices): """Class for creating enumerated string choices.""" def _generate_next_value_(name, start, count, last_values): return name
castiel248/Convert
Lib/site-packages/django/db/models/enums.py
Python
mit
2,804
import copy import datetime import functools import inspect import warnings from collections import defaultdict from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet from django.db import DatabaseError, NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == "DecimalField": sql = "CAST(%s AS NUMERIC)" % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = "+" SUB = "-" MUL = "*" DIV = "/" POW = "^" # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = "%%" # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = "&" BITOR = "|" BITLEFTSHIFT = "<<" BITRIGHTSHIFT = ">>" BITXOR = "#" def _combine(self, other, connector, reversed): if not hasattr(other, "resolve_expression"): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def __xor__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) ^ Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, "conditional", False) and getattr(other, "conditional", False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __rxor__(self, other): raise NotImplementedError( "Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations." ) def __invert__(self): return NegatedExpression(self) class BaseExpression: """Base class for all query expressions.""" empty_result_set_value = NotImplemented # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop("convert_value", None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, "resolve_expression") else (F(arg) if isinstance(arg, str) else Value(arg)) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any( expr and expr.contains_aggregate for expr in self.get_source_expressions() ) @cached_property def contains_over_clause(self): return any( expr and expr.contains_over_clause for expr in self.get_source_expressions() ) @cached_property def contains_column_references(self): return any( expr and expr.contains_column_references for expr in self.get_source_expressions() ) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions( [ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ] ) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError("Cannot resolve expression type, unknown output_field") return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. As a guess, if the output fields of all source fields match then simply infer the same type here. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ # This guess is mostly a bad idea, but there is quite a lot of code # (especially 3rd party Func subclasses) that depend on it, we'd need a # deprecation path to fix it. sources_iter = ( source for source in self.get_source_fields() if source is not None ) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( "Expression contains mixed types: %s, %s. You must " "set output_field." % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == "FloatField": return ( lambda value, expression, connection: None if value is None else float(value) ) elif internal_type.endswith("IntegerField"): return ( lambda value, expression, connection: None if value is None else int(value) ) elif internal_type == "DecimalField": return ( lambda value, expression, connection: None if value is None else Decimal(value) ) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions( [ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ] ) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.copy() source_expressions = clone.get_source_expressions() clone.set_source_expressions( [ expr.replace_expressions(replacements) if expr else None for expr in source_expressions ] ) return clone def get_refs(self): refs = set() for expr in self.get_source_expressions(): refs |= expr.get_refs() return refs def copy(self): return copy.copy(self) def prefix_references(self, prefix): clone = self.copy() clone.set_source_expressions( [ F(f"{prefix}{expr.name}") if isinstance(expr, F) else expr.prefix_references(prefix) for expr in self.get_source_expressions() ] ) return clone def get_group_by_cols(self): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, "flatten"): yield from expr.flatten() else: yield expr def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, "select_format"): return self.output_field.select_format(compiler, sql, params) return sql, params @deconstructible class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, Expression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) # Type inference for CombinedExpression.output_field. # Missing items will result in FieldError, by design. # # The current approach for NULL is based on lowest common denominator behavior # i.e. if one of the supported databases is raising an error (rather than # return NULL) for `val <op> NULL`, then Django raises FieldError. NoneType = type(None) _connector_combinations = [ # Numeric operations - operands of same type. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), (fields.FloatField, fields.FloatField, fields.FloatField), (fields.DecimalField, fields.DecimalField, fields.DecimalField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, # Behavior for DIV with integer arguments follows Postgres/SQLite, # not MySQL/Oracle. Combinable.DIV, Combinable.MOD, Combinable.POW, ) }, # Numeric operations - operands of different type. { connector: [ (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, ) }, # Bitwise operators. { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), ] for connector in ( Combinable.BITAND, Combinable.BITOR, Combinable.BITLEFTSHIFT, Combinable.BITRIGHTSHIFT, Combinable.BITXOR, ) }, # Numeric with NULL. { connector: [ (field_type, NoneType, field_type), (NoneType, field_type, field_type), ] for connector in ( Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV, Combinable.MOD, Combinable.POW, ) for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField) }, # Date/DateTimeField/DurationField/TimeField. { Combinable.ADD: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DurationField, fields.DateField, fields.DateTimeField), (fields.DurationField, fields.DateTimeField, fields.DateTimeField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.DurationField, fields.TimeField, fields.TimeField), ], }, { Combinable.SUB: [ # Date/DateTimeField. (fields.DateField, fields.DurationField, fields.DateTimeField), (fields.DateTimeField, fields.DurationField, fields.DateTimeField), (fields.DateField, fields.DateField, fields.DurationField), (fields.DateField, fields.DateTimeField, fields.DurationField), (fields.DateTimeField, fields.DateField, fields.DurationField), (fields.DateTimeField, fields.DateTimeField, fields.DurationField), # DurationField. (fields.DurationField, fields.DurationField, fields.DurationField), # TimeField. (fields.TimeField, fields.DurationField, fields.TimeField), (fields.TimeField, fields.TimeField, fields.DurationField), ], }, ] _connector_combinators = defaultdict(list) def register_combinable_fields(lhs, connector, rhs, result): """ Register combinable types: lhs <connector> rhs -> result e.g. register_combinable_fields( IntegerField, Combinable.ADD, FloatField, FloatField ) """ _connector_combinators[connector].append((lhs, rhs, result)) for d in _connector_combinations: for connector, field_types in d.items(): for lhs, rhs, result in field_types: register_combinable_fields(lhs, connector, rhs, result) @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass( rhs_type, combinator_rhs_type ): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): # We avoid using super() here for reasons given in # Expression._resolve_output_field() combined_type = _resolve_combined_type( self.connector, type(self.lhs._output_field_or_none), type(self.rhs._output_field_or_none), ) if combined_type is None: raise FieldError( f"Cannot infer type of {self.connector!r} expression involving these " f"types: {self.lhs.output_field.__class__.__name__}, " f"{self.rhs.output_field.__class__.__name__}. You must set " f"output_field." ) return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression( self.lhs, self.connector, self.rhs ).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {"DateField", "DateTimeField", "TimeField"} if ( self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type ): return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == "DurationField": sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = "(%s)" sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) if self.connector in {Combinable.MUL, Combinable.DIV}: try: lhs_type = self.lhs.output_field.get_internal_type() rhs_type = self.rhs.output_field.get_internal_type() except (AttributeError, FieldError): pass else: allowed_fields = { "DecimalField", "DurationField", "FloatField", "IntegerField", } if lhs_type not in allowed_fields or rhs_type not in allowed_fields: raise DatabaseError( f"Invalid arguments for operator {self.connector}." ) return sql, params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals( self.lhs.output_field.get_internal_type(), lhs, rhs ) @deconstructible(path="django.db.models.F") class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def replace_expressions(self, replacements): return replacements.get(self, self) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) def copy(self): return copy.copy(self) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False contains_over_clause = False def as_sql(self, *args, **kwargs): raise ValueError( "This queryset contains a reference to an outer query and may " "only be used in a subquery." ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) if col.contains_over_clause: raise NotSupportedError( f"Referencing outer query window expression is not supported: " f"{self.name}." ) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self @deconstructible(path="django.db.models.Func") class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = "%(function)s(%(expressions)s)" arg_joiner = ", " arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ", ".join( str(key) + "=" + str(val) for key, val in sorted(extra.items()) ) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql( self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context, ): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: try: arg_sql, arg_params = compiler.compile(arg) except EmptyResultSet: empty_result_set_value = getattr( arg, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: raise arg_sql, arg_params = compiler.compile(Value(empty_result_set_value)) except FullResultSet: arg_sql, arg_params = compiler.compile(Value(True)) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data["function"] = function else: data.setdefault("function", self.function) template = template or data.get("template", self.template) arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner) data["expressions"] = data["field"] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy @deconstructible(path="django.db.models.Value") class Value(SQLiteNumericMixin, Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return f"{self.__class__.__name__}({self.value!r})" def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, "get_placeholder"): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return "NULL", [] return "%s", [val] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() @property def empty_result_set_value(self): return self.value class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return "(%s)" % self.sql, self.params def get_group_by_cols(self): return [self] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # Resolve parents fields used in raw SQL. if query.model: for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref( parent_field.name, allow_joins, reuse, summarize ) break return super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return "*", [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return "{}({})".format(self.__class__.__name__, ", ".join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = ".".join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__( relabels.get(self.alias, self.alias), self.target, self.output_field ) def get_group_by_cols(self): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return self.output_field.get_db_converters( connection ) + self.target.get_db_converters(connection) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): (self.source,) = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def get_refs(self): return {self.refs} def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like a partition clause. """ template = "%(expressions)s" def __init__(self, *expressions, **extra): if not expressions: raise ValueError( "%s requires at least one expression." % self.__class__.__name__ ) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context) class OrderByList(Func): template = "ORDER BY %(expressions)s" def __init__(self, *expressions, **extra): expressions = ( ( OrderBy(F(expr[1:]), descending=True) if isinstance(expr, str) and expr[0] == "-" else expr ) for expr in expressions ) super().__init__(*expressions, **extra) def as_sql(self, *args, **kwargs): if not self.source_expressions: return "", () return super().as_sql(*args, **kwargs) def get_group_by_cols(self): group_by_cols = [] for order_by in self.get_source_expressions(): group_by_cols.extend(order_by.get_group_by_cols()) return group_by_cols @deconstructible(path="django.db.models.ExpressionWrapper") class ExpressionWrapper(SQLiteNumericMixin, Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self): if isinstance(self.expression, Expression): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols() # For non-expressions e.g. an SQL WHERE clause, the entire # `expression` must be included in the GROUP BY clause. return super().get_group_by_cols() def as_sql(self, compiler, connection): return compiler.compile(self.expression) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class NegatedExpression(ExpressionWrapper): """The logical negation of a conditional expression.""" def __init__(self, expression): super().__init__(expression, output_field=fields.BooleanField()) def __invert__(self): return self.expression.copy() def as_sql(self, compiler, connection): try: sql, params = super().as_sql(compiler, connection) except EmptyResultSet: features = compiler.connection.features if not features.supports_boolean_expr_in_select_clause: return "1=1", () return compiler.compile(Value(True)) ops = compiler.connection.ops # Some database backends (e.g. Oracle) don't allow EXISTS() and filters # to be compared to another expression unless they're wrapped in a CASE # WHEN. if not ops.conditional_expression_supported_in_where_clause(self.expression): return f"CASE WHEN {sql} = 0 THEN 1 ELSE 0 END", params return f"NOT {sql}", params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): resolved = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) if not getattr(resolved.expression, "conditional", False): raise TypeError("Cannot negate non-conditional expressions.") return resolved def select_format(self, compiler, sql, params): # Wrap boolean expressions with a CASE WHEN expression if a database # backend (e.g. Oracle) doesn't support boolean expression in SELECT or # GROUP BY list. expression_supported_in_where_clause = ( compiler.connection.ops.conditional_expression_supported_in_where_clause ) if ( not compiler.connection.features.supports_boolean_expr_in_select_clause # Avoid double wrapping. and expression_supported_in_where_clause(self.expression) ): sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.When") class When(Expression): template = "WHEN %(condition)s THEN %(result)s" # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, "conditional", False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, "conditional", False) or lookups: raise TypeError( "When() supports a Q object, a boolean expression, or lookups " "as a condition." ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize if hasattr(c.condition, "resolve_expression"): c.condition = c.condition.resolve_expression( query, allow_joins, reuse, summarize, False ) c.result = c.result.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params["condition"] = condition_sql result_sql, result_params = compiler.compile(self.result) template_params["result"] = result_sql template = template or self.template return template % template_params, ( *sql_params, *condition_params, *result_params, ) def get_group_by_cols(self): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols @deconstructible(path="django.db.models.Case") class Case(SQLiteNumericMixin, Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = "CASE %(cases)s ELSE %(default)s END" case_joiner = " " def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % ( ", ".join(str(c) for c in self.cases), self.default, ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression( query, allow_joins, reuse, summarize, for_save ) c.default = c.default.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql( self, compiler, connection, template=None, case_joiner=None, **extra_context ): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] default_sql, default_params = compiler.compile(self.default) for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue except FullResultSet: default_sql, default_params = compiler.compile(case.result) break case_parts.append(case_sql) sql_params.extend(case_params) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params["cases"] = case_joiner.join(case_parts) template_params["default"] = default_sql sql_params.extend(default_params) template = template or template_params.get("template", self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params def get_group_by_cols(self): if not self.cases: return self.default.get_group_by_cols() return super().get_group_by_cols() class Subquery(BaseExpression, Combinable): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = "(%(subquery)s)" contains_aggregate = False empty_result_set_value = None subquery = True def __init__(self, queryset, output_field=None, **extra): # Allow the usage of both QuerySet and sql.Query objects. self.query = getattr(queryset, "query", queryset).clone() self.query.subquery = True self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def get_external_cols(self): return self.query.get_external_cols() def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params["subquery"] = subquery_sql[1:-1] template = template or template_params.get("template", self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self): return self.query.get_group_by_cols(wrapper=self) class Exists(Subquery): template = "EXISTS(%(subquery)s)" output_field = fields.BooleanField() empty_result_set_value = False def __init__(self, queryset, **kwargs): super().__init__(queryset, **kwargs) self.query = self.query.exists() def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql) return sql, params @deconstructible(path="django.db.models.OrderBy") class OrderBy(Expression): template = "%(expression)s %(ordering)s" conditional = False def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None): if nulls_first and nulls_last: raise ValueError("nulls_first and nulls_last are mutually exclusive") if nulls_first is False or nulls_last is False: # When the deprecation ends, replace with: # raise ValueError( # "nulls_first and nulls_last values must be True or None." # ) warnings.warn( "Passing nulls_first=False or nulls_last=False is deprecated, use None " "instead.", RemovedInDjango50Warning, stacklevel=2, ) self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, "resolve_expression"): raise ValueError("expression must be an expression type") self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending ) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = "%s NULLS LAST" % template elif self.nulls_first: template = "%s NULLS FIRST" % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NULL, %s" % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = "%%(expression)s IS NOT NULL, %s" % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { "expression": expression_sql, "ordering": "DESC" if self.descending else "ASC", **extra_context, } params *= template.count("%(expression)s") return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped # in a CASE WHEN. if connection.ops.conditional_expression_supported_in_where_clause( self.expression ): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first: self.nulls_last = True self.nulls_first = None elif self.nulls_last: self.nulls_first = True self.nulls_last = None return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(SQLiteNumericMixin, Expression): template = "%(expression)s OVER (%(window)s)" # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True def __init__( self, expression, partition_by=None, order_by=None, frame=None, output_field=None, ): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, "window_compatible", False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = OrderByList(*self.order_by) elif isinstance(self.order_by, (BaseExpression, str)): self.order_by = OrderByList(self.order_by) else: raise ValueError( "Window.order_by must be either a string reference to a " "field, an expression, or a list or tuple of them." ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError("This backend does not support window expressions.") expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], () if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template="PARTITION BY %(expressions)s", ) window_sql.append(sql_expr) window_params += tuple(sql_params) if self.order_by is not None: order_sql, order_params = compiler.compile(self.order_by) window_sql.append(order_sql) window_params += tuple(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(frame_sql) window_params += tuple(frame_params) template = template or self.template return ( template % {"expression": expr_sql, "window": " ".join(window_sql).strip()}, (*params, *window_params), ) def as_sqlite(self, compiler, connection): if isinstance(self.output_field, fields.DecimalField): # Casting to numeric must be outside of the window expression. copy = self.copy() source_expressions = copy.get_source_expressions() source_expressions[0].output_field = fields.FloatField() copy.set_source_expressions(source_expressions) return super(Window, copy).as_sqlite(compiler, connection) return self.as_sql(compiler, connection) def __str__(self): return "{} OVER ({}{}{})".format( str(self.source_expression), "PARTITION BY " + str(self.partition_by) if self.partition_by else "", str(self.order_by or ""), str(self.frame or ""), ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): group_by_cols = [] if self.partition_by: group_by_cols.extend(self.partition_by.get_group_by_cols()) if self.order_by is not None: group_by_cols.extend(self.order_by.get_group_by_cols()) return group_by_cols class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = "%(frame_type)s BETWEEN %(start)s AND %(end)s" def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end( connection, self.start.value, self.end.value ) return ( self.template % { "frame_type": self.frame_type, "start": start, "end": end, }, [], ) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_group_by_cols(self): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = "%d %s" % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { "frame_type": self.frame_type, "start": start, "end": end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError("Subclasses must implement window_frame_start_end().") class RowRange(WindowFrame): frame_type = "ROWS" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = "RANGE" def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
castiel248/Convert
Lib/site-packages/django/db/models/expressions.py
Python
mit
64,809
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField", "BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField", "DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "Field", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField", "IntegerField", "NOT_PROVIDED", "NullBooleanField", "PositiveBigIntegerField", "PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallAutoField", "SmallIntegerField", "TextField", "TimeField", "URLField", "UUIDField", ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { "invalid_choice": _("Value %(value)r is not a valid choice."), "null": _("This field cannot be null."), "blank": _("This field cannot be blank."), "unique": _("%(model_name)s with this %(field_label)s already exists."), "unique_for_date": _( # Translators: The 'lookup_type' is one of 'date', 'year' or # 'month'. Eg: "Title must be unique for pub_date year" "%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s." ), } system_check_deprecated_details = None system_check_removed_details = None # Attributes that don't affect a column definition. # These attributes are ignored when altering the field. non_db_attrs = ( "blank", "choices", "db_column", "editable", "error_messages", "help_text", "limit_choices_to", # Database-level options are not supported, see #21961. "on_delete", "related_name", "related_query_name", "validators", "verbose_name", ) # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _("Field of type: %(field_type)s") % { "field_type": self.__class__.__name__ } description = property(_description) def __init__( self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text="", db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None, db_comment=None, ): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self.db_comment = db_comment self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later self._error_messages = error_messages # Store for deconstruction later def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, "model"): return super().__str__() model = self.model return "%s.%s" % (model._meta.label, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, "name", None) if name is not None: return "<%s: %s>" % (path, name) return "<%s>" % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_db_comment(**kwargs), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith("_"): return [ checks.Error( "Field names must not end with an underscore.", obj=self, id="fields.E001", ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % LOOKUP_SEP, obj=self, id="fields.E002", ) ] elif self.name == "pk": return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id="fields.E003", ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id="fields.E004", ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max( [ choice_max_length, *( len(value) for value, _ in group_choices if isinstance(value, str) ), ] ) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value( human_name ): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id="fields.E009", ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id="fields.E005", ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id="fields.E006", ) ] else: return [] def _check_db_comment(self, databases=None, **kwargs): if not self.db_comment or not databases: return [] errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( connection.features.supports_comments or "supports_comments" in self.model._meta.required_db_features ): errors.append( checks.Warning( f"{connection.display_name} does not support comments on " f"columns (db_comment).", obj=self, id="fields.W163", ) ) return errors def _check_null_allowed_for_primary_keys(self): if ( self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls ): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( "Primary keys must not have null=True.", hint=( "Set null=False on the field, or " "remove primary_key=True argument." ), obj=self, id="fields.E007", ) ] else: return [] def _check_backend_specific_checks(self, databases=None, **kwargs): if databases is None: return [] errors = [] for alias in databases: if router.allow_migrate_model(alias, self.model): errors.extend(connections[alias].validation.check_field(self, **kwargs)) return errors def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id="fields.E008", ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( "msg", "%s has been removed except for support in historical " "migrations." % self.__class__.__name__, ), hint=self.system_check_removed_details.get("hint"), obj=self, id=self.system_check_removed_details.get("id", "fields.EXXX"), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( "msg", "%s has been deprecated." % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get("hint"), obj=self, id=self.system_check_deprecated_details.get("id", "fields.WXXX"), ) ] return [] def get_col(self, alias, output_field=None): if alias == self.model._meta.db_table and ( output_field is None or output_field == self ): return self.cached_col from django.db.models.expressions import Col return Col(alias, self, output_field) @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class, e.g. django.db.models.IntegerField. This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": "", "db_column": None, "db_comment": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith("django.db.models.fields.json"): path = path.replace("django.db.models.fields.json", "django.db.models") elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return self.creation_counter == other.creation_counter and getattr( self, "model", None ) == getattr(other, "model", None) return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. # Order by creation_counter first for backward compatibility. if isinstance(other, Field): if ( self.creation_counter != other.creation_counter or not hasattr(self, "model") and not hasattr(other, "model") ): return self.creation_counter < other.creation_counter elif hasattr(self, "model") != hasattr(other, "model"): return not hasattr(self, "model") # Order no-model fields first else: # creation_counter's are equal, compare only models. return (self.model._meta.app_label, self.model._meta.model_name) < ( other.model._meta.app_label, other.model._meta.model_name, ) return NotImplemented def __hash__(self): return hash(self.creation_counter) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, "field") and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, "model"): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop("_get_default", None) return _empty, (self.__class__,), state return _load_field, ( self.model._meta.app_label, self.model._meta.object_name, self.name, ) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def error_messages(self): messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, "default_error_messages", {})) messages.update(self._error_messages or {}) return messages @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, "code") and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages["null"], code="null") if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages["blank"], code="blank") def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return ( connection.data_type_check_constraints[self.get_internal_type()] % data ) except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: column_type = connection.data_types[self.get_internal_type()] except KeyError: return None else: # column_type is either a single-parameter function or a string. if callable(column_type): return column_type(data) return column_type % data def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, "from_db_value"): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace("_", " ") def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: # Don't override a get_FOO_display() method defined explicitly on # this class, but don't check methods derived from inheritance, to # allow overriding inherited choices. For more complex inheritance # structures users should override contribute_to_class(). if "get_%s_display" % self.name not in cls.__dict__: setattr( cls, "get_%s_display" % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" if hasattr(value, "as_sql"): return value return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if ( not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls ): return return_None return str # return empty string def get_choices( self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=(), ): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any( choice in ("", None) for choice, _ in self.flatchoices ) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, "get_related_field") else "pk" ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { "required": not self.blank, "label": capfirst(self.verbose_name), "help_text": self.help_text, } if self.has_default(): if callable(self.default): defaults["initial"] = self.default defaults["show_hidden_initial"] = True else: defaults["initial"] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = self.blank or not ( self.has_default() or "initial" in kwargs ) defaults["choices"] = self.get_choices(include_blank=include_blank) defaults["coerce"] = self.to_python if self.null: defaults["empty_value"] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ( "coerce", "empty_value", "choices", "required", "widget", "label", "initial", "help_text", "error_messages", "show_hidden_initial", "disabled", ): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be either True or False."), "invalid_nullable": _("“%(value)s” value must be either True, False, or None."), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ("t", "True", "1"): return True if value in ("f", "False", "0"): return False raise exceptions.ValidationError( self.error_messages["invalid_nullable" if self.null else "invalid"], code="invalid", params={"value": value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or "initial" in kwargs) defaults = {"choices": self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {"form_class": form_class, "required": False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) @property def description(self): if self.max_length is not None: return _("String (up to %(max_length)s)") else: return _("String (unlimited)") def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: if ( connection.features.supports_unlimited_charfield or "supports_unlimited_charfield" in self.model._meta.required_db_features ): return [] return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id="fields.E120", ) ] elif ( not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0 ): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id="fields.E121", ) ] else: return [] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_charfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_charfield ): errors.append( checks.Error( "%s does not support a database collation on " "CharFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {"max_length": self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults["empty_value"] = None defaults.update(kwargs) return super().formfield(**defaults) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { "msg": ( "CommaSeparatedIntegerField is removed except for support in " "historical migrations." ), "hint": ( "Use CharField(validators=[validate_comma_separated_integer_list]) " "instead." ), "id": "fields.E901", } def _to_naive(value): if timezone.is_aware(value): value = timezone.make_naive(value, datetime.timezone.utc) return value def _get_naive_now(): return _to_naive(timezone.now()) class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [ self.auto_now_add, self.auto_now, self.has_default(), ] enabled_options = [ option not in (None, False) for option in mutually_exclusive_options ].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id="fields.E160", ) ] else: return [] def _check_fix_default_value(self): return [] # Concrete subclasses use this in their implementations of # _check_fix_default_value(). def _check_if_value_fixed(self, value, now=None): """ Check if the given value appears to have been provided as a "fixed" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object. """ if now is None: now = _get_naive_now() offset = datetime.timedelta(seconds=10) lower = now - offset upper = now + offset if isinstance(value, datetime.datetime): value = _to_naive(value) else: assert isinstance(value, datetime.date) lower = lower.date() upper = upper.date() if lower <= value <= upper: return [ checks.Warning( "Fixed default value provided.", hint=( "It seems you set a fixed date / time / datetime " "value as default for this field. This may not be " "what you want. If you want to have the current date " "as default, use `django.utils.timezone.now`" ), obj=self, id="fields.W161", ) ] return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid date format. It must be " "in YYYY-MM-DD format." ), "invalid_date": _( "“%(value)s” value has the correct format (YYYY-MM-DD) " "but it is an invalid date." ), } description = _("Date (without time)") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): value = _to_naive(value).date() elif isinstance(value, datetime.date): pass else: # No explicit date / datetime value -- no checks necessary return [] # At this point, value is a date object. return self._check_if_value_fixed(value) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs["auto_now"] = True if self.auto_now_add: kwargs["auto_now_add"] = True if self.auto_now or self.auto_now_add: del kwargs["editable"] del kwargs["blank"] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, "get_next_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=True ), ) setattr( cls, "get_previous_by_%s" % self.name, partialmethod( cls._get_next_or_previous_by_FIELD, field=self, is_next=False ), ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateField, **kwargs, } ) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format." ), "invalid_date": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date." ), "invalid_datetime": _( "“%(value)s” value has the correct format " "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " "but it is an invalid date/time." ), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, (datetime.datetime, datetime.date)): return self._check_if_value_fixed(value) # No explicit date / datetime value -- no checks necessary. return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn( "DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_datetime"], code="invalid_datetime", params={"value": value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_date"], code="invalid_date", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = "%s.%s" % (self.model.__name__, self.name) except AttributeError: name = "(unbound)" warnings.warn( "DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning, ) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DateTimeField, **kwargs, } ) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a decimal number."), } description = _("Decimal number") def __init__( self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs, ): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id="fields.E130", ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id="fields.E131", ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id="fields.E132", ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id="fields.E133", ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id="fields.E134", ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs["max_digits"] = self.max_digits if self.decimal_places is not None: kwargs["decimal_places"] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value try: if isinstance(value, float): decimal_value = self.context.create_decimal_from_float(value) else: decimal_value = decimal.Decimal(value) except (decimal.InvalidOperation, TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) if not decimal_value.is_finite(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return decimal_value def get_db_prep_save(self, value, connection): if hasattr(value, "as_sql"): return value return connection.ops.adapt_decimalfield_value( self.to_python(value), self.max_digits, self.decimal_places ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield( **{ "max_digits": self.max_digits, "decimal_places": self.decimal_places, "form_class": forms.DecimalField, **kwargs, } ) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "[DD] [[HH:]MM:]ss[.uuuuuu] format." ) } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.DurationField, **kwargs, } ) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault("max_length", 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield( **{ "form_class": forms.EmailField, **kwargs, } ) class FilePathField(Field): description = _("File path") def __init__( self, verbose_name=None, name=None, path="", match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs, ): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' " "set to True.", obj=self, id="fields.E140", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != "": kwargs["path"] = self.path if self.match is not None: kwargs["match"] = self.match if self.recursive is not False: kwargs["recursive"] = self.recursive if self.allow_files is not True: kwargs["allow_files"] = self.allow_files if self.allow_folders is not False: kwargs["allow_folders"] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield( **{ "path": self.path() if callable(self.path) else self.path, "match": self.match, "recursive": self.recursive, "form_class": forms.FilePathField, "allow_files": self.allow_files, "allow_folders": self.allow_folders, **kwargs, } ) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be a float."), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FloatField, **kwargs, } ) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { "invalid": _("“%(value)s” value must be an integer."), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id="fields.W122", ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) return connection.ops.adapt_integerfield_value(value, self.get_internal_type()) def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.IntegerField, **kwargs, } ) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": -BigIntegerField.MAX_BIGINT - 1, "max_value": BigIntegerField.MAX_BIGINT, **kwargs, } ) class SmallIntegerField(IntegerField): description = _("Small integer") def get_internal_type(self): return "SmallIntegerField" class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { "msg": ( "IPAddressField has been removed except for support in " "historical migrations." ), "hint": "Use GenericIPAddressField instead.", "id": "fields.E900", } def __init__(self, *args, **kwargs): kwargs["max_length"] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__( self, verbose_name=None, name=None, protocol="both", unpack_ipv4=False, *args, **kwargs, ): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol ( self.default_validators, invalid_error_message, ) = validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages["invalid"] = invalid_error_message kwargs["max_length"] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, "null", False) and getattr(self, "blank", False): return [ checks.Error( "GenericIPAddressFields cannot have blank=True if null=False, " "as blank values are stored as nulls.", obj=self, id="fields.E150", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs["unpack_ipv4"] = self.unpack_ipv4 if self.protocol != "both": kwargs["protocol"] = self.protocol if kwargs.get("max_length") == 39: del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ":" in value: return clean_ipv6_address( value, self.unpack_ipv4, self.error_messages["invalid"] ) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ":" in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield( **{ "protocol": self.protocol, "form_class": forms.GenericIPAddressField, **kwargs, } ) class NullBooleanField(BooleanField): default_error_messages = { "invalid": _("“%(value)s” value must be either None, True or False."), "invalid_nullable": _("“%(value)s” value must be either None, True or False."), } description = _("Boolean (Either True, False or None)") system_check_removed_details = { "msg": ( "NullBooleanField is removed except for support in historical " "migrations." ), "hint": "Use BooleanField(null=True) instead.", "id": "fields.E903", } def __init__(self, *args, **kwargs): kwargs["null"] = True kwargs["blank"] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["null"] del kwargs["blank"] return name, path, args, kwargs class PositiveIntegerRelDbTypeMixin: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if not hasattr(cls, "integer_field_class"): cls.integer_field_class = next( ( parent for parent in cls.__mro__[1:] if issubclass(parent, IntegerField) ), None, ) def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return self.integer_field_class().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, BigIntegerField): description = _("Positive big integer") def get_internal_type(self): return "PositiveBigIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, SmallIntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield( **{ "min_value": 0, **kwargs, } ) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__( self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs ): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs["max_length"] if self.db_index is False: kwargs["db_index"] = False else: del kwargs["db_index"] if self.allow_unicode is not False: kwargs["allow_unicode"] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.SlugField, "allow_unicode": self.allow_unicode, **kwargs, } ) class TextField(Field): description = _("Text") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation def check(self, **kwargs): databases = kwargs.get("databases") or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), ] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or "supports_collation_on_textfield" in self.model._meta.required_db_features or connection.features.supports_collation_on_textfield ): errors.append( checks.Error( "%s does not support a database collation on " "TextFields." % connection.display_name, obj=self, id="fields.E190", ), ) return errors def db_parameters(self, connection): db_params = super().db_parameters(connection) db_params["collation"] = self.db_collation return db_params def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield( **{ "max_length": self.max_length, **({} if self.choices is not None else {"widget": forms.Textarea}), **kwargs, } ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs["db_collation"] = self.db_collation return name, path, args, kwargs class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { "invalid": _( "“%(value)s” value has an invalid format. It must be in " "HH:MM[:ss[.uuuuuu]] format." ), "invalid_time": _( "“%(value)s” value has the correct format " "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time." ), } description = _("Time") def __init__( self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs ): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs["editable"] = False kwargs["blank"] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): now = None elif isinstance(value, datetime.time): now = _get_naive_now() # This will not use the right date in the race condition where now # is just before the date change and value is just past 0:00. value = datetime.datetime.combine(now.date(), value) else: # No explicit time / datetime value -- no checks necessary return [] # At this point, value is a datetime object. return self._check_if_value_fixed(value, now=now) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs["blank"] del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages["invalid_time"], code="invalid_time", params={"value": value}, ) raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return "" if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.TimeField, **kwargs, } ) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault("max_length", 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs["max_length"] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield( **{ "form_class": forms.URLField, **kwargs, } ) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b""] def __init__(self, *args, **kwargs): kwargs.setdefault("editable", False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id="fields.E170", ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs["editable"] = True else: del kwargs["editable"] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == "": return b"" return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode("ascii") def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode("ascii"))) return value class UUIDField(Field): default_error_messages = { "invalid": _("“%(value)s” is not a valid UUID."), } description = _("Universally unique identifier") empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs["max_length"] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["max_length"] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = "int" if isinstance(value, int) else "hex" try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) return value def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.UUIDField, **kwargs, } ) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs["blank"] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( "AutoFields must set primary_key=True.", obj=self, id="fields.E100", ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["blank"] kwargs["primary_key"] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): if cls._meta.auto_field: raise ValueError( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__( instance ) def __subclasscheck__(self, subclass): return issubclass(subclass, self._subclasses) or super().__subclasscheck__( subclass ) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return "AutoField" def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return "BigAutoField" def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return "SmallAutoField" def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
castiel248/Convert
Lib/site-packages/django/db/models/fields/__init__.py
Python
mit
95,674
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import AltersData from django.utils.translation import gettext_lazy as _ class FieldFile(File, AltersData): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, "name"): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError( "The '%s' attribute has no file associated with it." % self.field.name ) def _get_file(self): self._require_file() if getattr(self, "_file", None) is None: self._file = self.storage.open(self.name, "rb") return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode="rb"): self._require_file() if getattr(self, "_file", None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, "_file"): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, "_file", None) return file is None or file.closed def close(self): file = getattr(self, "_file", None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { "name": self.name, "closed": False, "_committed": True, "_file": None, "instance": self.instance, "field": self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, "field"): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__( self, verbose_name=None, name=None, upload_to="", storage=None, **kwargs ): self._primary_key_set_explicitly = "primary_key" in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % ( self.__class__.__qualname__, Storage.__module__, Storage.__qualname__, ) ) self.upload_to = upload_to kwargs.setdefault("max_length", 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id="fields.E201", ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith("/"): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id="fields.E202", hint="Remove the leading slash.", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs["upload_to"] = self.upload_to storage = getattr(self, "_storage_callable", self.storage) if storage is not default_storage: kwargs["storage"] = storage return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for # database insertion. if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or "") def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.FileField, "max_length": self.max_length, **kwargs, } ) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, "_dimensions_cache"): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__( self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs, ): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( "Cannot use ImageField because Pillow is not installed.", hint=( "Get Pillow at https://pypi.org/project/Pillow/ " 'or run command "python -m pip install Pillow".' ), obj=self, id="fields.E210", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs["width_field"] = self.width_field if self.height_field: kwargs["height_field"] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not ( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.ImageField, **kwargs, } )
castiel248/Convert
Lib/site-packages/django/db/models/fields/files.py
Python
mit
18,842
import json import warnings from django import forms from django.core import checks, exceptions from django.db import NotSupportedError, connections, router from django.db.models import expressions, lookups from django.db.models.constants import LOOKUP_SEP from django.db.models.fields import TextField from django.db.models.lookups import ( FieldGetDbPrepValueMixin, PostgresOperatorLookup, Transform, ) from django.utils.deprecation import RemovedInDjango51Warning from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import CheckFieldDefaultMixin __all__ = ["JSONField"] class JSONField(CheckFieldDefaultMixin, Field): empty_strings_allowed = False description = _("A JSON object") default_error_messages = { "invalid": _("Value must be valid JSON."), } _default_hint = ("dict", "{}") def __init__( self, verbose_name=None, name=None, encoder=None, decoder=None, **kwargs, ): if encoder and not callable(encoder): raise ValueError("The encoder parameter must be a callable object.") if decoder and not callable(decoder): raise ValueError("The decoder parameter must be a callable object.") self.encoder = encoder self.decoder = decoder super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) databases = kwargs.get("databases") or [] errors.extend(self._check_supported(databases)) return errors def _check_supported(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if ( self.model._meta.required_db_vendor and self.model._meta.required_db_vendor != connection.vendor ): continue if not ( "supports_json_field" in self.model._meta.required_db_features or connection.features.supports_json_field ): errors.append( checks.Error( "%s does not support JSONFields." % connection.display_name, obj=self.model, id="fields.E180", ) ) return errors def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.encoder is not None: kwargs["encoder"] = self.encoder if self.decoder is not None: kwargs["decoder"] = self.decoder return name, path, args, kwargs def from_db_value(self, value, expression, connection): if value is None: return value # Some backends (SQLite at least) extract non-string values in their # SQL datatypes. if isinstance(expression, KeyTransform) and not isinstance(value, str): return value try: return json.loads(value, cls=self.decoder) except json.JSONDecodeError: return value def get_internal_type(self): return "JSONField" def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) # RemovedInDjango51Warning: When the deprecation ends, replace with: # if ( # isinstance(value, expressions.Value) # and isinstance(value.output_field, JSONField) # ): # value = value.value # elif hasattr(value, "as_sql"): ... if isinstance(value, expressions.Value): if isinstance(value.value, str) and not isinstance( value.output_field, JSONField ): try: value = json.loads(value.value, cls=self.decoder) except json.JSONDecodeError: value = value.value else: warnings.warn( "Providing an encoded JSON string via Value() is deprecated. " f"Use Value({value!r}, output_field=JSONField()) instead.", category=RemovedInDjango51Warning, ) elif isinstance(value.output_field, JSONField): value = value.value else: return value elif hasattr(value, "as_sql"): return value return connection.ops.adapt_json_value(value, self.encoder) def get_db_prep_save(self, value, connection): if value is None: return value return self.get_db_prep_value(value, connection) def get_transform(self, name): transform = super().get_transform(name) if transform: return transform return KeyTransformFactory(name) def validate(self, value, model_instance): super().validate(value, model_instance) try: json.dumps(value, cls=self.encoder) except TypeError: raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def value_to_string(self, obj): return self.value_from_object(obj) def formfield(self, **kwargs): return super().formfield( **{ "form_class": forms.JSONField, "encoder": self.encoder, "decoder": self.decoder, **kwargs, } ) def compile_json_path(key_transforms, include_root=True): path = ["$"] if include_root else [] for key_transform in key_transforms: try: num = int(key_transform) except ValueError: # non-integer path.append(".") path.append(json.dumps(key_transform)) else: path.append("[%s]" % num) return "".join(path) class DataContains(FieldGetDbPrepValueMixin, PostgresOperatorLookup): lookup_name = "contains" postgres_operator = "@>" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contains lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params class ContainedBy(FieldGetDbPrepValueMixin, PostgresOperatorLookup): lookup_name = "contained_by" postgres_operator = "<@" def as_sql(self, compiler, connection): if not connection.features.supports_json_field_contains: raise NotSupportedError( "contained_by lookup is not supported on this database backend." ) lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(rhs_params) + tuple(lhs_params) return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params class HasKeyLookup(PostgresOperatorLookup): logical_operator = None def compile_json_path_final_key(self, key_transform): # Compile the final key without interpreting ints as array elements. return ".%s" % json.dumps(key_transform) def as_sql(self, compiler, connection, template=None): # Process JSON path from the left-hand side. if isinstance(self.lhs, KeyTransform): lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs( compiler, connection ) lhs_json_path = compile_json_path(lhs_key_transforms) else: lhs, lhs_params = self.process_lhs(compiler, connection) lhs_json_path = "$" sql = template % lhs # Process JSON path from the right-hand side. rhs = self.rhs rhs_params = [] if not isinstance(rhs, (list, tuple)): rhs = [rhs] for key in rhs: if isinstance(key, KeyTransform): *_, rhs_key_transforms = key.preprocess_lhs(compiler, connection) else: rhs_key_transforms = [key] *rhs_key_transforms, final_key = rhs_key_transforms rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False) rhs_json_path += self.compile_json_path_final_key(final_key) rhs_params.append(lhs_json_path + rhs_json_path) # Add condition for each key. if self.logical_operator: sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params)) return sql, tuple(lhs_params) + tuple(rhs_params) def as_mysql(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)" ) def as_oracle(self, compiler, connection): sql, params = self.as_sql( compiler, connection, template="JSON_EXISTS(%s, '%%s')" ) # Add paths directly into SQL because path expressions cannot be passed # as bind variables on Oracle. return sql % tuple(params), [] def as_postgresql(self, compiler, connection): if isinstance(self.rhs, KeyTransform): *_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection) for key in rhs_key_transforms[:-1]: self.lhs = KeyTransform(key, self.lhs) self.rhs = rhs_key_transforms[-1] return super().as_postgresql(compiler, connection) def as_sqlite(self, compiler, connection): return self.as_sql( compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL" ) class HasKey(HasKeyLookup): lookup_name = "has_key" postgres_operator = "?" prepare_rhs = False class HasKeys(HasKeyLookup): lookup_name = "has_keys" postgres_operator = "?&" logical_operator = " AND " def get_prep_lookup(self): return [str(item) for item in self.rhs] class HasAnyKeys(HasKeys): lookup_name = "has_any_keys" postgres_operator = "?|" logical_operator = " OR " class HasKeyOrArrayIndex(HasKey): def compile_json_path_final_key(self, key_transform): return compile_json_path([key_transform], include_root=False) class CaseInsensitiveMixin: """ Mixin to allow case-insensitive comparison of JSON values on MySQL. MySQL handles strings used in JSON context using the utf8mb4_bin collation. Because utf8mb4_bin is a binary collation, comparison of JSON values is case-sensitive. """ def process_lhs(self, compiler, connection): lhs, lhs_params = super().process_lhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % lhs, lhs_params return lhs, lhs_params def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "mysql": return "LOWER(%s)" % rhs, rhs_params return rhs, rhs_params class JSONExact(lookups.Exact): can_use_none_as_rhs = True def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) # Treat None lookup values as null. if rhs == "%s" and rhs_params == [None]: rhs_params = ["null"] if connection.vendor == "mysql": func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params) rhs %= tuple(func) return rhs, rhs_params class JSONIContains(CaseInsensitiveMixin, lookups.IContains): pass JSONField.register_lookup(DataContains) JSONField.register_lookup(ContainedBy) JSONField.register_lookup(HasKey) JSONField.register_lookup(HasKeys) JSONField.register_lookup(HasAnyKeys) JSONField.register_lookup(JSONExact) JSONField.register_lookup(JSONIContains) class KeyTransform(Transform): postgres_operator = "->" postgres_nested_operator = "#>" def __init__(self, key_name, *args, **kwargs): super().__init__(*args, **kwargs) self.key_name = str(key_name) def preprocess_lhs(self, compiler, connection): key_transforms = [self.key_name] previous = self.lhs while isinstance(previous, KeyTransform): key_transforms.insert(0, previous.key_name) previous = previous.lhs lhs, params = compiler.compile(previous) if connection.vendor == "oracle": # Escape string-formatting. key_transforms = [key.replace("%", "%%") for key in key_transforms] return lhs, params, key_transforms def as_mysql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,) def as_oracle(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return ( "COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))" % ((lhs, json_path) * 2) ), tuple(params) * 2 def as_postgresql(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) if len(key_transforms) > 1: sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator) return sql, tuple(params) + (key_transforms,) try: lookup = int(self.key_name) except ValueError: lookup = self.key_name return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,) def as_sqlite(self, compiler, connection): lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) datatype_values = ",".join( [repr(datatype) for datatype in connection.ops.jsonfield_datatype_values] ) return ( "(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) " "THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)" ) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3 class KeyTextTransform(KeyTransform): postgres_operator = "->>" postgres_nested_operator = "#>>" output_field = TextField() def as_mysql(self, compiler, connection): if connection.mysql_is_mariadb: # MariaDB doesn't support -> and ->> operators (see MDEV-13594). sql, params = super().as_mysql(compiler, connection) return "JSON_UNQUOTE(%s)" % sql, params else: lhs, params, key_transforms = self.preprocess_lhs(compiler, connection) json_path = compile_json_path(key_transforms) return "(%s ->> %%s)" % lhs, tuple(params) + (json_path,) @classmethod def from_lookup(cls, lookup): transform, *keys = lookup.split(LOOKUP_SEP) if not keys: raise ValueError("Lookup must contain key or index transforms.") for key in keys: transform = cls(key, transform) return transform KT = KeyTextTransform.from_lookup class KeyTransformTextLookupMixin: """ Mixin for combining with a lookup expecting a text lhs from a JSONField key lookup. On PostgreSQL, make use of the ->> operator instead of casting key values to text and performing the lookup on the resulting representation. """ def __init__(self, key_transform, *args, **kwargs): if not isinstance(key_transform, KeyTransform): raise TypeError( "Transform should be an instance of KeyTransform in order to " "use this lookup." ) key_text_transform = KeyTextTransform( key_transform.key_name, *key_transform.source_expressions, **key_transform.extra, ) super().__init__(key_text_transform, *args, **kwargs) class KeyTransformIsNull(lookups.IsNull): # key__isnull=False is the same as has_key='key' def as_oracle(self, compiler, connection): sql, params = HasKeyOrArrayIndex( self.lhs.lhs, self.lhs.key_name, ).as_oracle(compiler, connection) if not self.rhs: return sql, params # Column doesn't have a key or IS NULL. lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection) return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params) def as_sqlite(self, compiler, connection): template = "JSON_TYPE(%s, %%s) IS NULL" if not self.rhs: template = "JSON_TYPE(%s, %%s) IS NOT NULL" return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql( compiler, connection, template=template, ) class KeyTransformIn(lookups.In): def resolve_expression_parameter(self, compiler, connection, sql, param): sql, params = super().resolve_expression_parameter( compiler, connection, sql, param, ) if ( not hasattr(param, "as_sql") and not connection.features.has_native_json_field ): if connection.vendor == "oracle": value = json.loads(param) sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" if isinstance(value, (list, dict)): sql %= "JSON_QUERY" else: sql %= "JSON_VALUE" elif connection.vendor == "mysql" or ( connection.vendor == "sqlite" and params[0] not in connection.ops.jsonfield_datatype_values ): sql = "JSON_EXTRACT(%s, '$')" if connection.vendor == "mysql" and connection.mysql_is_mariadb: sql = "JSON_UNQUOTE(%s)" % sql return sql, params class KeyTransformExact(JSONExact): def process_rhs(self, compiler, connection): if isinstance(self.rhs, KeyTransform): return super(lookups.Exact, self).process_rhs(compiler, connection) rhs, rhs_params = super().process_rhs(compiler, connection) if connection.vendor == "oracle": func = [] sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')" for value in rhs_params: value = json.loads(value) if isinstance(value, (list, dict)): func.append(sql % "JSON_QUERY") else: func.append(sql % "JSON_VALUE") rhs %= tuple(func) elif connection.vendor == "sqlite": func = [] for value in rhs_params: if value in connection.ops.jsonfield_datatype_values: func.append("%s") else: func.append("JSON_EXTRACT(%s, '$')") rhs %= tuple(func) return rhs, rhs_params def as_oracle(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if rhs_params == ["null"]: # Field has key and it's NULL. has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name) has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection) is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True) is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection) return ( "%s AND %s" % (has_key_sql, is_null_sql), tuple(has_key_params) + tuple(is_null_params), ) return super().as_sql(compiler, connection) class KeyTransformIExact( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact ): pass class KeyTransformIContains( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains ): pass class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith): pass class KeyTransformIStartsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith ): pass class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith): pass class KeyTransformIEndsWith( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith ): pass class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex): pass class KeyTransformIRegex( CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex ): pass class KeyTransformNumericLookupMixin: def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) if not connection.features.has_native_json_field: rhs_params = [json.loads(value) for value in rhs_params] return rhs, rhs_params class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan): pass class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual): pass class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan): pass class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual): pass KeyTransform.register_lookup(KeyTransformIn) KeyTransform.register_lookup(KeyTransformExact) KeyTransform.register_lookup(KeyTransformIExact) KeyTransform.register_lookup(KeyTransformIsNull) KeyTransform.register_lookup(KeyTransformIContains) KeyTransform.register_lookup(KeyTransformStartsWith) KeyTransform.register_lookup(KeyTransformIStartsWith) KeyTransform.register_lookup(KeyTransformEndsWith) KeyTransform.register_lookup(KeyTransformIEndsWith) KeyTransform.register_lookup(KeyTransformRegex) KeyTransform.register_lookup(KeyTransformIRegex) KeyTransform.register_lookup(KeyTransformLt) KeyTransform.register_lookup(KeyTransformLte) KeyTransform.register_lookup(KeyTransformGt) KeyTransform.register_lookup(KeyTransformGte) class KeyTransformFactory: def __init__(self, key_name): self.key_name = key_name def __call__(self, *args, **kwargs): return KeyTransform(self.key_name, *args, **kwargs)
castiel248/Convert
Lib/site-packages/django/db/models/fields/json.py
Python
mit
22,508
from django.core import checks NOT_PROVIDED = object() class FieldCacheMixin: """Provide an API for working with the model's fields value cache.""" def get_cache_name(self): raise NotImplementedError def get_cached_value(self, instance, default=NOT_PROVIDED): cache_name = self.get_cache_name() try: return instance._state.fields_cache[cache_name] except KeyError: if default is NOT_PROVIDED: raise return default def is_cached(self, instance): return self.get_cache_name() in instance._state.fields_cache def set_cached_value(self, instance, value): instance._state.fields_cache[self.get_cache_name()] = value def delete_cached_value(self, instance): del instance._state.fields_cache[self.get_cache_name()] class CheckFieldDefaultMixin: _default_hint = ("<valid default>", "<invalid default>") def _check_default(self): if ( self.has_default() and self.default is not None and not callable(self.default) ): return [ checks.Warning( "%s default should be a callable instead of an instance " "so that it's not shared between all field instances." % (self.__class__.__name__,), hint=( "Use a callable instead, e.g., use `%s` instead of " "`%s`." % self._default_hint ), obj=self, id="fields.E010", ) ] else: return [] def check(self, **kwargs): errors = super().check(**kwargs) errors.extend(self._check_default()) return errors
castiel248/Convert
Lib/site-packages/django/db/models/fields/mixins.py
Python
mit
1,823
""" Field-like classes that aren't really fields. It's easier to use objects that have the same attributes as fields sometimes (avoids a lot of special casing). """ from django.db.models import fields class OrderWrt(fields.IntegerField): """ A proxy for the _order database field that is used when Meta.order_with_respect_to is specified. """ def __init__(self, *args, **kwargs): kwargs["name"] = "_order" kwargs["editable"] = False super().__init__(*args, **kwargs)
castiel248/Convert
Lib/site-packages/django/db/models/fields/proxy.py
Python
mit
515
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference, settings from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel RECURSIVE_RELATIONSHIP_CONSTANT = "self" def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False def __init__( self, related_name=None, related_query_name=None, limit_choices_to=None, **kwargs, ): self._related_name = related_name self._related_query_name = related_query_name self._limit_choices_to = limit_choices_to super().__init__(**kwargs) @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = ( not keyword.iskeyword(related_name) and related_name.isidentifier() ) if not (is_valid_id or related_name.endswith("+")): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % ( self.remote_field.related_name, self.model._meta.object_name, self.name, ), hint=( "Related name must be a valid Python identifier or end with a " "'+'" ), obj=self, id="fields.E306", ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith("_"): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % rel_query_name, hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E308", ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=( "Add or change a related_name or related_query_name " "argument for this field." ), obj=self, id="fields.E309", ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = ( self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name ) if rel_is_missing and ( rel_is_string or not self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id="fields.E300", ) ] return [] def _check_referencing_to_swapped_model(self): if ( self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped ): return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % self.remote_field.model._meta.label, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id="fields.E301", ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # f.remote_field.model may be a string instead of a model. Skip if # model name is not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" # i.e. "app_label.Model.field". field_name = "%s.%s" % (opts.label, self.name) # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: # i.e. "app_label.Target.model_set". clash_name = "%s.%s" % (rel_opts.label, clash_field.name) if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with field name " f"'{clash_name}'.", hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E302", ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=( "Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'." ) % (clash_name, field_name), obj=self, id="fields.E303", ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: # i.e. "app_label.Model.m2m". clash_name = "%s.%s" % ( clash_field.related_model._meta.label, clash_field.field.name, ) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( f"Reverse accessor '{rel_opts.object_name}.{rel_name}' " f"for '{field_name}' clashes with reverse accessor for " f"'{clash_name}'.", hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E304", ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name " "for '%s'." % (field_name, clash_name), hint=( "Add or change a related_name argument " "to the definition for '%s' or '%s'." ) % (field_name, clash_name), obj=self, id="fields.E305", ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name %= { "class": cls.__name__.lower(), "model_name": cls._meta.model_name.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { "class": cls.__name__.lower(), "app_label": cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation( resolve_related_class, cls, self.remote_field.model, field=self ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self._limit_choices_to: kwargs["limit_choices_to"] = self._limit_choices_to if self._related_name is not None: kwargs["related_name"] = self._related_name if self._related_query_name is not None: kwargs["related_query_name"] = self._related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { "%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_q = Q.create( [ (rh_field.attname, getattr(obj, lh_field.attname)) for lh_field, rh_field in self.related_fields ] ) descriptor_filter = self.get_extra_descriptor_filter(obj) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = self.name or ( self.remote_field.model._meta.model_name + "_" + self.remote_field.model._meta.pk.name ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, "get_related_field"): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update( { "limit_choices_to": limit_choices_to, } ) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return ( self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name ) @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.path_infos[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field " "was asked for" ) return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__( self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs, ): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__( rel=rel, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def __copy__(self): obj = super().__copy__() # Remove any cached PathInfo values. obj.__dict__.pop("path_infos", None) obj.__dict__.pop("reverse_path_infos", None) return obj def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id="fields.E312", ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, "unique", False) } unique_foreign_fields.update( {frozenset(ut) for ut in self.remote_field.model._meta.unique_together} ) unique_foreign_fields.update( { frozenset(uc.fields) for uc in self.remote_field.model._meta.total_unique_constraints } ) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ", ".join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( "Mark a single field as unique=True or add a set of " "fields to a unique constraint (via unique_together " "or a UniqueConstraint (without condition) in the " "model Meta.constraints)." ), obj=self, id="fields.E310", ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must be unique because it is referenced by " "a foreign key." % (model_name, field_name), hint=( "Add unique=True to this field or add a " "UniqueConstraint (without condition) in the model " "Meta.constraints." ), obj=self, id="fields.E311", ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs["on_delete"] = self.remote_field.on_delete kwargs["from_fields"] = self.from_fields kwargs["to_fields"] = self.to_fields if self.remote_field.parent_link: kwargs["parent_link"] = self.remote_field.parent_link if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) # Set it kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError( "Foreign Object from and to fields must be the same non-zero length" ) if isinstance(self.remote_field.model, str): raise ValueError( "Related model %r cannot be resolved" % self.remote_field.model ) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = ( self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name) ) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple( rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field ) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if ( not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract ): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple( (lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source ) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, ) ] @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [ PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, ) ] @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() @classmethod @functools.lru_cache(maxsize=None) def get_class_lookups(cls): bases = inspect.getmro(cls) bases = bases[: bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related), ) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append( self.remote_field.limit_choices_to ) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { "invalid": _("%(model)s instance with %(field)s %(value)r does not exist.") } description = _("Foreign Key (type determined by related field)") def __init__( self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs, ): try: to._meta.model_name except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ForeignKey must be " "either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError("on_delete must be callable.") kwargs["rel"] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault("db_index", True) super().__init__( to, on_delete, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def __class_getitem__(cls, *args, **kwargs): return cls def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, "on_delete", None) if on_delete == SET_NULL and not self.null: return [ checks.Error( "Field specifies on_delete=SET_NULL, but cannot be null.", hint=( "Set null=True argument on the field, or change the on_delete " "rule." ), obj=self, id="fields.E320", ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( "Field specifies on_delete=SET_DEFAULT, but has no default value.", hint="Set a default value, or change the on_delete rule.", obj=self, id="fields.E321", ) ] else: return [] def _check_unique(self, **kwargs): return ( [ checks.Warning( "Setting unique=True on a ForeignKey has the same effect as using " "a OneToOneField.", hint=( "ForeignKey(unique=True) is usually better served by a " "OneToOneField." ), obj=self, id="fields.W342", ) ] if self.unique else [] ) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs["to_fields"] del kwargs["from_fields"] # Handle the simpler arguments if self.db_index: del kwargs["db_index"] else: kwargs["db_index"] = False if self.db_constraint is not True: kwargs["db_constraint"] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name) ): kwargs["to_field"] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._base_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages["invalid"], code="invalid", params={ "model": self.remote_field.model._meta.verbose_name, "pk": value, "field": self.remote_field.field_name, "value": value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if ( to_field and to_field.model != self.remote_field.model._meta.concrete_model ): raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return "%s_id" % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or ( value == "" and ( not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls ) ): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError( "Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model) ) return super().formfield( **{ "form_class": forms.ModelChoiceField, "queryset": self.remote_field.model._default_manager.using(using), "to_field_name": self.remote_field.field_name, **kwargs, "blank": self.blank, } ) def db_check(self, connection): return None def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): target_db_parameters = self.target_field.db_parameters(connection) return { "type": self.db_type(connection), "check": self.db_check(connection), "collation": target_db_parameters.get("collation"), } def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError("Cannot resolve output_field.") return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs["unique"] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs["unique"] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = "%s_%s" % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = "to_%s" % to from_ = "from_%s" % from_ meta = type( "Meta", (), { "db_table": field._get_m2m_db_table(klass._meta), "auto_created": klass, "app_label": klass._meta.app_label, "db_tablespace": klass._meta.db_tablespace, "unique_together": (from_, to), "verbose_name": _("%(from)s-%(to)s relationship") % {"from": from_, "to": to}, "verbose_name_plural": _("%(from)s-%(to)s relationships") % {"from": from_, "to": to}, "apps": field.model._meta.apps, }, ) # Construct and return the new class. return type( name, (models.Model,), { "Meta": meta, "__module__": klass.__module__, from_: models.ForeignKey( klass, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name="%s+" % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), }, ) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__( self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs, ): try: to._meta except AttributeError: if not isinstance(to, str): raise TypeError( "%s(%r) is invalid. First parameter to ManyToManyField " "must be either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) if symmetrical is None: symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT if through is not None and db_table is not None: raise ValueError( "Cannot specify a db_table if an intermediary model is used." ) kwargs["rel"] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = "null" in kwargs super().__init__( related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( "ManyToManyFields cannot be unique.", obj=self, id="fields.E330", ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( "null has no effect on ManyToManyField.", obj=self, id="fields.W340", ) ) if self._validators: warnings.append( checks.Warning( "ManyToManyField does not support validators.", obj=self, id="fields.W341", ) ) if self.remote_field.symmetrical and self._related_name: warnings.append( checks.Warning( "related_name has no effect on ManyToManyField " 'with a symmetrical relationship, e.g. to "self".', obj=self, id="fields.W345", ) ) if self.db_comment: warnings.append( checks.Warning( "db_comment has no effect on ManyToManyField.", obj=self, id="fields.W346", ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, "_meta"): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__, ) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models( include_auto_created=True ): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id="fields.E331", ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint=( "Use through_fields to specify which two foreign keys " "Django should use." ), obj=self.remote_field.through, id="fields.E333", ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, "model", None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." ) % (self, from_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E334", ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( "If you want to create a recursive relationship, " 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id="fields.E335", ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % (self, from_model_name, to_model_name), obj=self.remote_field.through, id="fields.E336", ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not ( len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1] ): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint=( "Make sure you specify 'through_fields' as " "through_fields=('field1', 'field2')" ), obj=self, id="fields.E337", ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = ( from_model, self.remote_field.through, self.remote_field.model, ) source_field_name, target_field_name = self.remote_field.through_fields[ :2 ] for field_name, related_model in ( (source_field_name, source), (target_field_name, target), ): possible_field_names = [] for f in through._meta.fields: if ( hasattr(f, "remote_field") and getattr(f.remote_field, "model", None) == related_model ): possible_field_names.append(f.name) if possible_field_names: hint = ( "Did you mean one of the following foreign keys to '%s': " "%s?" % ( related_model._meta.object_name, ", ".join(possible_field_names), ) ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id="fields.E338", ) ) else: if not ( hasattr(field, "remote_field") and getattr(field.remote_field, "model", None) == related_model ): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id="fields.E339", ) ) return errors def _check_table_uniqueness(self, **kwargs): if ( isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed ): return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if ( model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model ): if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = "%s.%s" % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, "fields.W344" error_hint = ( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of %r is correctly routed to a separate " "database." % clashing_obj ) else: error_class, error_id = checks.Error, "fields.E340" error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs["db_table"] = self.db_table if self.remote_field.db_constraint is not True: kwargs["db_constraint"] = self.remote_field.db_constraint # Lowercase model names as they should be treated as case-insensitive. if isinstance(self.remote_field.model, str): if "." in self.remote_field.model: app_label, model_name = self.remote_field.model.split(".") kwargs["to"] = "%s.%s" % (app_label, model_name.lower()) else: kwargs["to"] = self.remote_field.model.lower() else: kwargs["to"] = self.remote_field.model._meta.label_lower if getattr(self.remote_field, "through", None) is not None: if isinstance(self.remote_field.through, str): kwargs["through"] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs["through"] = self.remote_field.through._meta.label # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs["to"], "setting_name"): if kwargs["to"].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs["to"].setting_name, swappable_setting) ) kwargs["to"] = SettingsReference( kwargs["to"], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.reverse_path_infos if filtered_relation: join2infos = linkfield2.get_path_info(filtered_relation) else: join2infos = linkfield2.path_infos else: join1infos = linkfield2.reverse_path_infos if filtered_relation: join2infos = linkfield1.get_path_info(filtered_relation) else: join2infos = linkfield1.path_infos # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) @cached_property def path_infos(self): return self.get_path_info() def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) @cached_property def reverse_path_infos(self): return self.get_reverse_path_info() def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = "_m2m_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if ( f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name) ): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = "_m2m_reverse_%s_cache" % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_%s_+" % ( cls._meta.app_label, cls.__name__.lower(), name, ) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation( resolve_through_model, cls, self.remote_field.through, field=self ) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model( self, cls ) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if ( not self.remote_field.is_hidden() and not related.related_model._meta.swapped ): setattr( cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True), ) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, "column") self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column") self.m2m_field_name = partial(self._get_m2m_attr, related, "name") self.m2m_reverse_field_name = partial( self._get_m2m_reverse_attr, related, "name" ) get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field") self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial( self._get_m2m_reverse_attr, related, "remote_field" ) self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { "form_class": forms.ModelMultipleChoiceField, "queryset": self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get("initial") is not None: initial = defaults["initial"] if callable(initial): initial = initial() defaults["initial"] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
castiel248/Convert
Lib/site-packages/django/db/models/fields/related.py
Python
mit
75,796
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from asgiref.sync import sync_to_async from django.core.exceptions import FieldError from django.db import ( DEFAULT_DB_ALIAS, NotSupportedError, connections, router, transaction, ) from django.db.models import Q, Window, signals from django.db.models.functions import RowNumber from django.db.models.lookups import GreaterThan, LessThanOrEqual from django.db.models.query import QuerySet from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import AltersData, resolve_callables from django.utils.functional import cached_property class ForeignKeyDeferredAttribute(DeferredAttribute): def __set__(self, instance, value): if instance.__dict__.get(self.field.attname) != value and self.field.is_cached( instance ): self.field.delete_cached_value(instance) instance.__dict__[self.field.attname] = value def _filter_prefetch_queryset(queryset, field_name, instances): predicate = Q(**{f"{field_name}__in": instances}) db = queryset._db or DEFAULT_DB_ALIAS if queryset.query.is_sliced: if not connections[db].features.supports_over_clause: raise NotSupportedError( "Prefetching from a limited queryset is only supported on backends " "that support window functions." ) low_mark, high_mark = queryset.query.low_mark, queryset.query.high_mark order_by = [ expr for expr, _ in queryset.query.get_compiler(using=db).get_order_by() ] window = Window(RowNumber(), partition_by=field_name, order_by=order_by) predicate &= GreaterThan(window, low_mark) if high_mark is not None: predicate &= LessThanOrEqual(window, high_mark) queryset.query.clear_limits() return queryset.filter(predicate) class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( "RelatedObjectDoesNotExist", (self.field.remote_field.model.DoesNotExist, AttributeError), { "__module__": self.field.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.field.model.__qualname__, self.field.name, ), }, ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] remote_field = self.field.remote_field # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = { "%s__in" % related_field.name: {instance_attr(inst)[0] for inst in instances} } else: query = {"%s__in" % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False, ) def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = ( instance._meta.get_ancestor_link(self.field.model) if has_value else None ) if ancestor_link and ancestor_link.is_cached(instance): # An ancestor link will exist if this field is defined on a # multi-table inheritance parent of the instance's class. ancestor = ancestor_link.get_cached_value(instance) # The value might be cached on an ancestor if the instance # originated from walking down the inheritance chain. rel_obj = self.field.get_cached_value(ancestor, default=None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance( value, self.field.remote_field.model._meta.concrete_model ): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} obj = rel_model(**kwargs) obj._state.adding = instance._state.adding obj._state.db = instance._state.db return obj return super().get_object(instance) def __set__(self, instance, value): super().__set__(instance, value) # If the primary key is a link to a parent model and a parent instance # is being set, update the value of the inherited pk(s). if self.field.primary_key and self.field.remote_field.parent_link: opts = instance._meta # Inherited primary key fields from this object's base classes. inherited_pk_fields = [ field for field in opts.concrete_fields if field.primary_key and field.remote_field ] for field in inherited_pk_fields: rel_model_pk_name = field.remote_field.model._meta.pk.attname raw_value = ( getattr(value, rel_model_pk_name) if value is not None else None ) setattr(instance, rel_model_pk_name, raw_value) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): # Following the example above, `related` is an instance of OneToOneRel # which represents the reverse restaurant field (place.restaurant). self.related = related @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( "RelatedObjectDoesNotExist", (self.related.related_model.DoesNotExist, AttributeError), { "__module__": self.related.model.__module__, "__qualname__": "%s.%s.RelatedObjectDoesNotExist" % ( self.related.model.__qualname__, self.related.name, ), }, ) def is_cached(self, instance): return self.related.is_cached(instance) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.related.field.get_local_related_value instance_attr = self.related.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {"%s__in" % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] self.related.field.set_cached_value(rel_obj, instance) return ( queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False, ) def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = self.related.get_cached_value(instance) except KeyError: related_pk = instance.pk if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. self.related.field.set_cached_value(rel_obj, instance) self.related.set_cached_value(instance, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (instance.__class__.__name__, self.related.get_accessor_name()) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. # Following the example above, this would be the cached # ``restaurant`` instance (if any). rel_obj = self.related.get_cached_value(instance, default=None) if rel_obj is not None: # Remove the ``restaurant`` instance from the ``place`` # instance cache. self.related.delete_cached_value(instance) # Set the ``place`` field on the ``restaurant`` # instance to None. setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write( instance.__class__, instance=value ) if value._state.db is None: value._state.db = router.db_for_write( value.__class__, instance=instance ) if not router.allow_relation(value, instance): raise ValueError( 'Cannot assign "%r": the current database router prevents this ' "relation." % value ) related_pk = tuple( getattr(instance, field.attname) for field in self.related.field.foreign_related_fields ) # Set the value of the related field to the value of the related # object's related field. for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.related.set_cached_value(instance, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): # Same purpose as ForwardManyToOneDescriptor.__reduce__(). return getattr, (self.related.model, self.related.name) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( "reverse side of a related set", self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( "Direct assignment to the %s is prohibited. Use %s.set() instead." % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass, AltersData): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _check_fk_val(self): for field in self.field.foreign_related_fields: if getattr(self.instance, field.attname) is None: raise ValueError( f'"{self.instance!r}" needs to have a value for field ' f'"{field.attname}" before this relationship can be used.' ) def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[ db ].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == "" and empty_strings_as_null): return queryset.none() if self.field.many_to_one: # Guard against field-like objects such as GenericRelation # that abuse create_reverse_many_to_one_manager() with reverse # one-to-many relationships instead and break known related # objects assignment. try: target_field = self.field.target_field except FieldError: # The relationship has multiple target fields. Use a tuple # for related object id. rel_obj_id = tuple( [ getattr(self.instance, target_field.attname) for target_field in self.field.path_infos[-1].target_fields ] ) else: rel_obj_id = getattr(self.instance, target_field.attname) queryset._known_related_objects = { self.field: {rel_obj_id: self.instance} } return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop( self.field.remote_field.get_cache_name() ) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if self.instance.pk is None: raise ValueError( f"{self.instance.__class__.__name__!r} instance needs to have a " f"primary key value before this relationship can be used." ) try: return self.instance._prefetched_objects_cache[ self.field.remote_field.get_cache_name() ] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} queryset = _filter_prefetch_queryset(queryset, self.field.name, instances) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: if not self.field.is_cached(rel_obj): instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.remote_field.get_cache_name() return queryset, rel_obj_attr, instance_attr, False, cache_name, False def add(self, *objs, bulk=True): self._check_fk_val() self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update( **{ self.field.name: self.instance, } ) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True async def aadd(self, *objs, bulk=True): return await sync_to_async(self.add)(*objs, bulk=bulk) aadd.alters_data = True def create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) acreate.alters_data = True def get_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True async def aget_or_create(self, **kwargs): return await sync_to_async(self.get_or_create)(**kwargs) aget_or_create.alters_data = True def update_or_create(self, **kwargs): self._check_fk_val() kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True async def aupdate_or_create(self, **kwargs): return await sync_to_async(self.update_or_create)(**kwargs) aupdate_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a # value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return self._check_fk_val() val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: if not isinstance(obj, self.model): raise TypeError( "'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, ) ) # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True async def aremove(self, *objs, bulk=True): return await sync_to_async(self.remove)(*objs, bulk=bulk) aremove.alters_data = True def clear(self, *, bulk=True): self._check_fk_val() self._clear(self, bulk) clear.alters_data = True async def aclear(self, *, bulk=True): return await sync_to_async(self.clear)(bulk=bulk) aclear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): self._check_fk_val() # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear(bulk=bulk) self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True async def aset(self, objs, *, bulk=True, clear=False): return await sync_to_async(self.set)(objs=objs, bulk=bulk, clear=clear) aset.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( "%s side of a many-to-many set" % ("reverse" if self.reverse else "forward"), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass, AltersData): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = "%s__%s" % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError( '"%r" needs to have a value for field "%s" before ' "this many-to-many relationship can be used." % (instance, self.pk_field_names[self.source_field_name]) ) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError( "%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__ ) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager( manager.__class__, rel, reverse ) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q.create([(self.source_field_name, self.related_val)]) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = ( not isinstance(removed_vals, QuerySet) or removed_vals._has_filters() ) if removed_vals_filters: filters &= Q.create([(f"{self.target_field_name}__in", removed_vals)]) if self.symmetrical: symmetrical_filters = Q.create( [(self.target_field_name, self.related_val)] ) if removed_vals_filters: symmetrical_filters &= Q.create( [(f"{self.source_field_name}__in", removed_vals)] ) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) queryset = _filter_prefetch_queryset( queryset._next_is_sticky(), self.query_field_name, instances ) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra( select={ "_prefetch_related_val_%s" % f.attname: "%s.%s" % (qn(join_table), qn(f.column)) for f in fk.local_related_fields } ) return ( queryset, lambda result: tuple( getattr(result, "_prefetch_related_val_%s" % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True async def aadd(self, *objs, through_defaults=None): return await sync_to_async(self.add)( *objs, through_defaults=through_defaults ) aadd.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True async def aremove(self, *objs): return await sync_to_async(self.remove)(*objs) aremove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True async def aclear(self): return await sync_to_async(self.clear)() aclear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set( self.using(db).values_list( self.target_field.target_field.attname, flat=True ) ) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True async def aset(self, objs, *, clear=False, through_defaults=None): return await sync_to_async(self.set)( objs=objs, clear=clear, through_defaults=through_defaults ) aset.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True async def acreate(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.create)( through_defaults=through_defaults, **kwargs ) acreate.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create( **kwargs ) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True async def aget_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( through_defaults=through_defaults, **kwargs ) aget_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super( ManyRelatedManager, self.db_manager(db) ).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True async def aupdate_or_create(self, *, through_defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( through_defaults=through_defaults, **kwargs ) aupdate_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids( self, source_field_name, target_field_name, db, target_ids ): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = ( self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter( **{ source_field_name: self.related_val[0], "%s__in" % target_field_name: target_ids, } ) ) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( self.through._meta.auto_created is not False and connections[db].features.supports_ignore_conflicts ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = ( self.reverse or source_field_name == self.source_field_name ) and (signals.m2m_changed.has_listeners(self.through)) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return ( can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals), ) def _add_items( self, source_field_name, target_field_name, *objs, through_defaults=None ): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys # of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan( db, source_field_name ) if can_fast_add: self.through._default_manager.using(db).bulk_create( [ self.through( **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, } ) for target_id in target_ids ], ignore_conflicts=True, ) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action="pre_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create( [ self.through( **through_defaults, **{ "%s_id" % source_field_name: self.related_val[0], "%s_id" % target_field_name: target_id, }, ) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts, ) if must_send_signals: signals.m2m_changed.send( sender=self.through, action="post_add", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter( **{"%s__in" % self.target_field.target_field.attname: old_ids} ) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
castiel248/Convert
Lib/site-packages/django/db/models/fields/related_descriptors.py
Python
mit
61,824
import warnings from django.db.models.lookups import ( Exact, GreaterThan, GreaterThanOrEqual, In, IsNull, LessThan, LessThanOrEqual, ) from django.utils.deprecation import RemovedInDjango50Warning class MultiColSource: contains_aggregate = False contains_over_clause = False def __init__(self, alias, targets, sources, field): self.targets, self.sources, self.field, self.alias = ( targets, sources, field, alias, ) self.output_field = self.field def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.alias, self.field) def relabeled_clone(self, relabels): return self.__class__( relabels.get(self.alias, self.alias), self.targets, self.sources, self.field ) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def resolve_expression(self, *args, **kwargs): return self def get_normalized_value(value, lhs): from django.db.models import Model if isinstance(value, Model): if value.pk is None: # When the deprecation ends, replace with: # raise ValueError( # "Model instances passed to related filters must be saved." # ) warnings.warn( "Passing unsaved model instances to related filters is deprecated.", RemovedInDjango50Warning, ) value_list = [] sources = lhs.output_field.path_infos[-1].target_fields for source in sources: while not isinstance(value, source.model) and source.remote_field: source = source.remote_field.model._meta.get_field( source.remote_field.field_name ) try: value_list.append(getattr(value, source.attname)) except AttributeError: # A case like Restaurant.objects.filter(place=restaurant_instance), # where place is a OneToOneField and the primary key of Restaurant. return (value.pk,) return tuple(value_list) if not isinstance(value, tuple): return (value,) return value class RelatedIn(In): def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource): if self.rhs_is_direct_value(): # If we get here, we are dealing with single-column relations. self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs] # We need to run the related field's get_prep_value(). Consider # case ForeignKey to IntegerField given value 'abc'. The # ForeignKey itself doesn't have validation for non-integers, # so we must run validation using the target field. if hasattr(self.lhs.output_field, "path_infos"): # Run the target field's get_prep_value. We can safely # assume there is only one as we don't get to the direct # value branch otherwise. target_field = self.lhs.output_field.path_infos[-1].target_fields[ -1 ] self.rhs = [target_field.get_prep_value(v) for v in self.rhs] elif not getattr(self.rhs, "has_select_fields", True) and not getattr( self.lhs.field.target_field, "primary_key", False ): if ( getattr(self.lhs.output_field, "primary_key", False) and self.lhs.output_field.model == self.rhs.model ): # A case like # Restaurant.objects.filter(place__in=restaurant_qs), where # place is a OneToOneField and the primary key of # Restaurant. target_field = self.lhs.field.name else: target_field = self.lhs.field.target_field.name self.rhs.set_values([target_field]) return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): # For multicolumn lookups we need to build a multicolumn where clause. # This clause is either a SubqueryConstraint (for values that need # to be compiled to SQL) or an OR-combined list of # (col1 = val1 AND col2 = val2 AND ...) clauses. from django.db.models.sql.where import ( AND, OR, SubqueryConstraint, WhereNode, ) root_constraint = WhereNode(connector=OR) if self.rhs_is_direct_value(): values = [get_normalized_value(value, self.lhs) for value in self.rhs] for value in values: value_constraint = WhereNode() for source, target, val in zip( self.lhs.sources, self.lhs.targets, value ): lookup_class = target.get_lookup("exact") lookup = lookup_class( target.get_col(self.lhs.alias, source), val ) value_constraint.add(lookup, AND) root_constraint.add(value_constraint, OR) else: root_constraint.add( SubqueryConstraint( self.lhs.alias, [target.column for target in self.lhs.targets], [source.name for source in self.lhs.sources], self.rhs, ), AND, ) return root_constraint.as_sql(compiler, connection) return super().as_sql(compiler, connection) class RelatedLookupMixin: def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource) and not hasattr( self.rhs, "resolve_expression" ): # If we get here, we are dealing with single-column relations. self.rhs = get_normalized_value(self.rhs, self.lhs)[0] # We need to run the related field's get_prep_value(). Consider case # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself # doesn't have validation for non-integers, so we must run validation # using the target field. if self.prepare_rhs and hasattr(self.lhs.output_field, "path_infos"): # Get the target field. We can safely assume there is only one # as we don't get to the direct value branch otherwise. target_field = self.lhs.output_field.path_infos[-1].target_fields[-1] self.rhs = target_field.get_prep_value(self.rhs) return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): assert self.rhs_is_direct_value() self.rhs = get_normalized_value(self.rhs, self.lhs) from django.db.models.sql.where import AND, WhereNode root_constraint = WhereNode() for target, source, val in zip( self.lhs.targets, self.lhs.sources, self.rhs ): lookup_class = target.get_lookup(self.lookup_name) root_constraint.add( lookup_class(target.get_col(self.lhs.alias, source), val), AND ) return root_constraint.as_sql(compiler, connection) return super().as_sql(compiler, connection) class RelatedExact(RelatedLookupMixin, Exact): pass class RelatedLessThan(RelatedLookupMixin, LessThan): pass class RelatedGreaterThan(RelatedLookupMixin, GreaterThan): pass class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual): pass class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual): pass class RelatedIsNull(RelatedLookupMixin, IsNull): pass
castiel248/Convert
Lib/site-packages/django/db/models/fields/related_lookups.py
Python
mit
8,151
""" "Rel objects" for related fields. "Rel objects" (for lack of a better name) carry information about the relation modeled by a related field and provide some utility functions. They're stored in the ``remote_field`` attribute of the field. They also act as reverse fields for the purposes of the Meta API because they're the closest concept currently available. """ from django.core import exceptions from django.utils.functional import cached_property from django.utils.hashable import make_hashable from . import BLANK_CHOICE_DASH from .mixins import FieldCacheMixin class ForeignObjectRel(FieldCacheMixin): """ Used by ForeignObject to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ # Field flags auto_created = True concrete = False editable = False is_relation = True # Reverse relations are always nullable (Django can't enforce that a # foreign key on the related model points to this model). null = True empty_strings_allowed = False def __init__( self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None, ): self.field = field self.model = to self.related_name = related_name self.related_query_name = related_query_name self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to self.parent_link = parent_link self.on_delete = on_delete self.symmetrical = False self.multiple = True # Some of the following cached_properties can't be initialized in # __init__ as the field doesn't have its model yet. Calling these methods # before field.contribute_to_class() has been called will result in # AttributeError @cached_property def hidden(self): return self.is_hidden() @cached_property def name(self): return self.field.related_query_name() @property def remote_field(self): return self.field @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.path_infos[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "Can't use target_field for multicolumn relations." ) return target_fields[0] @cached_property def related_model(self): if not self.field.model: raise AttributeError( "This property can't be accessed before self.field.contribute_to_class " "has been called." ) return self.field.model @cached_property def many_to_many(self): return self.field.many_to_many @cached_property def many_to_one(self): return self.field.one_to_many @cached_property def one_to_many(self): return self.field.many_to_one @cached_property def one_to_one(self): return self.field.one_to_one def get_lookup(self, lookup_name): return self.field.get_lookup(lookup_name) def get_lookups(self): return self.field.get_lookups() def get_transform(self, name): return self.field.get_transform(name) def get_internal_type(self): return self.field.get_internal_type() @property def db_type(self): return self.field.db_type def __repr__(self): return "<%s: %s.%s>" % ( type(self).__name__, self.related_model._meta.app_label, self.related_model._meta.model_name, ) @property def identity(self): return ( self.field, self.model, self.related_name, self.related_query_name, make_hashable(self.limit_choices_to), self.parent_link, self.on_delete, self.symmetrical, self.multiple, ) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def __getstate__(self): state = self.__dict__.copy() # Delete the path_infos cached property because it can be recalculated # at first invocation after deserialization. The attribute must be # removed because subclasses like ManyToOneRel may have a PathInfo # which contains an intermediate M2M table that's been dynamically # created and doesn't exist in the .models module. # This is a reverse relation, so there is no reverse_path_infos to # delete. state.pop("path_infos", None) return state def get_choices( self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=(), ): """ Return choices with a default blank choices included, for use as <select> choices for this field. Analog of django.db.models.fields.Field.get_choices(), provided initially for utilization by RelatedFieldListFilter. """ limit_choices_to = limit_choices_to or self.limit_choices_to qs = self.related_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [(x.pk, str(x)) for x in qs] def is_hidden(self): """Should the related object be hidden?""" return bool(self.related_name) and self.related_name[-1] == "+" def get_joining_columns(self): return self.field.get_reverse_joining_columns() def get_extra_restriction(self, alias, related_alias): return self.field.get_extra_restriction(related_alias, alias) def set_field_name(self): """ Set the related field's name, this is not available until later stages of app loading, so set_field_name is called from set_attributes_from_rel() """ # By default foreign object doesn't relate to any remote field (for # example custom multicolumn joins currently have no remote field). self.field_name = None def get_accessor_name(self, model=None): # This method encapsulates the logic that decides what name to give an # accessor descriptor that retrieves related many-to-one or # many-to-many objects. It uses the lowercased object_name + "_set", # but this can be overridden with the "related_name" option. Due to # backwards compatibility ModelForms need to be able to provide an # alternate model. See BaseInlineFormSet.get_default_prefix(). opts = model._meta if model else self.related_model._meta model = model or self.related_model if self.multiple: # If this is a symmetrical m2m relation on self, there is no # reverse accessor. if self.symmetrical and model == self.model: return None if self.related_name: return self.related_name return opts.model_name + ("_set" if self.multiple else "") def get_path_info(self, filtered_relation=None): if filtered_relation: return self.field.get_reverse_path_info(filtered_relation) else: return self.field.reverse_path_infos @cached_property def path_infos(self): return self.get_path_info() def get_cache_name(self): """ Return the name of the cache key to use for storing an instance of the forward model on the reverse model. """ return self.get_accessor_name() class ManyToOneRel(ForeignObjectRel): """ Used by the ForeignKey field to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. Note: Because we somewhat abuse the Rel objects by using them as reverse fields we get the funny situation where ``ManyToOneRel.many_to_one == False`` and ``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual ManyToOneRel class is a private API and there is work underway to turn reverse relations into actual fields. """ def __init__( self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None, ): super().__init__( field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) self.field_name = field_name def __getstate__(self): state = super().__getstate__() state.pop("related_model", None) return state @property def identity(self): return super().identity + (self.field_name,) def get_related_field(self): """ Return the Field in the 'to' object to which this relationship is tied. """ field = self.model._meta.get_field(self.field_name) if not field.concrete: raise exceptions.FieldDoesNotExist( "No related field named '%s'" % self.field_name ) return field def set_field_name(self): self.field_name = self.field_name or self.model._meta.pk.name class OneToOneRel(ManyToOneRel): """ Used by OneToOneField to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ def __init__( self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None, ): super().__init__( field, to, field_name, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) self.multiple = False class ManyToManyRel(ForeignObjectRel): """ Used by ManyToManyField to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ def __init__( self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=True, through=None, through_fields=None, db_constraint=True, ): super().__init__( field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, ) if through and not db_constraint: raise ValueError("Can't supply a through model and db_constraint=False") self.through = through if through_fields and not through: raise ValueError("Cannot specify through_fields without a through model") self.through_fields = through_fields self.symmetrical = symmetrical self.db_constraint = db_constraint @property def identity(self): return super().identity + ( self.through, make_hashable(self.through_fields), self.db_constraint, ) def get_related_field(self): """ Return the field in the 'to' object to which this relationship is tied. Provided for symmetry with ManyToOneRel. """ opts = self.through._meta if self.through_fields: field = opts.get_field(self.through_fields[0]) else: for field in opts.fields: rel = getattr(field, "remote_field", None) if rel and rel.model == self.model: break return field.foreign_related_fields[0]
castiel248/Convert
Lib/site-packages/django/db/models/fields/reverse_related.py
Python
mit
12,480
from .comparison import Cast, Coalesce, Collate, Greatest, JSONObject, Least, NullIf from .datetime import ( Extract, ExtractDay, ExtractHour, ExtractIsoWeekDay, ExtractIsoYear, ExtractMinute, ExtractMonth, ExtractQuarter, ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Now, Trunc, TruncDate, TruncDay, TruncHour, TruncMinute, TruncMonth, TruncQuarter, TruncSecond, TruncTime, TruncWeek, TruncYear, ) from .math import ( Abs, ACos, ASin, ATan, ATan2, Ceil, Cos, Cot, Degrees, Exp, Floor, Ln, Log, Mod, Pi, Power, Radians, Random, Round, Sign, Sin, Sqrt, Tan, ) from .text import ( MD5, SHA1, SHA224, SHA256, SHA384, SHA512, Chr, Concat, ConcatPair, Left, Length, Lower, LPad, LTrim, Ord, Repeat, Replace, Reverse, Right, RPad, RTrim, StrIndex, Substr, Trim, Upper, ) from .window import ( CumeDist, DenseRank, FirstValue, Lag, LastValue, Lead, NthValue, Ntile, PercentRank, Rank, RowNumber, ) __all__ = [ # comparison and conversion "Cast", "Coalesce", "Collate", "Greatest", "JSONObject", "Least", "NullIf", # datetime "Extract", "ExtractDay", "ExtractHour", "ExtractMinute", "ExtractMonth", "ExtractQuarter", "ExtractSecond", "ExtractWeek", "ExtractIsoWeekDay", "ExtractWeekDay", "ExtractIsoYear", "ExtractYear", "Now", "Trunc", "TruncDate", "TruncDay", "TruncHour", "TruncMinute", "TruncMonth", "TruncQuarter", "TruncSecond", "TruncTime", "TruncWeek", "TruncYear", # math "Abs", "ACos", "ASin", "ATan", "ATan2", "Ceil", "Cos", "Cot", "Degrees", "Exp", "Floor", "Ln", "Log", "Mod", "Pi", "Power", "Radians", "Random", "Round", "Sign", "Sin", "Sqrt", "Tan", # text "MD5", "SHA1", "SHA224", "SHA256", "SHA384", "SHA512", "Chr", "Concat", "ConcatPair", "Left", "Length", "Lower", "LPad", "LTrim", "Ord", "Repeat", "Replace", "Reverse", "Right", "RPad", "RTrim", "StrIndex", "Substr", "Trim", "Upper", # window "CumeDist", "DenseRank", "FirstValue", "Lag", "LastValue", "Lead", "NthValue", "Ntile", "PercentRank", "Rank", "RowNumber", ]
castiel248/Convert
Lib/site-packages/django/db/models/functions/__init__.py
Python
mit
2,658
"""Database functions that do comparisons or type conversions.""" from django.db import NotSupportedError from django.db.models.expressions import Func, Value from django.db.models.fields import TextField from django.db.models.fields.json import JSONField from django.utils.regex_helper import _lazy_re_compile class Cast(Func): """Coerce an expression to a new field type.""" function = "CAST" template = "%(function)s(%(expressions)s AS %(db_type)s)" def __init__(self, expression, output_field): super().__init__(expression, output_field=output_field) def as_sql(self, compiler, connection, **extra_context): extra_context["db_type"] = self.output_field.cast_db_type(connection) return super().as_sql(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): db_type = self.output_field.db_type(connection) if db_type in {"datetime", "time"}: # Use strftime as datetime/time don't keep fractional seconds. template = "strftime(%%s, %(expressions)s)" sql, params = super().as_sql( compiler, connection, template=template, **extra_context ) format_string = "%H:%M:%f" if db_type == "time" else "%Y-%m-%d %H:%M:%f" params.insert(0, format_string) return sql, params elif db_type == "date": template = "date(%(expressions)s)" return super().as_sql( compiler, connection, template=template, **extra_context ) return self.as_sql(compiler, connection, **extra_context) def as_mysql(self, compiler, connection, **extra_context): template = None output_type = self.output_field.get_internal_type() # MySQL doesn't support explicit cast to float. if output_type == "FloatField": template = "(%(expressions)s + 0.0)" # MariaDB doesn't support explicit cast to JSON. elif output_type == "JSONField" and connection.mysql_is_mariadb: template = "JSON_EXTRACT(%(expressions)s, '$')" return self.as_sql(compiler, connection, template=template, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): # CAST would be valid too, but the :: shortcut syntax is more readable. # 'expressions' is wrapped in parentheses in case it's a complex # expression. return self.as_sql( compiler, connection, template="(%(expressions)s)::%(db_type)s", **extra_context, ) def as_oracle(self, compiler, connection, **extra_context): if self.output_field.get_internal_type() == "JSONField": # Oracle doesn't support explicit cast to JSON. template = "JSON_QUERY(%(expressions)s, '$')" return super().as_sql( compiler, connection, template=template, **extra_context ) return self.as_sql(compiler, connection, **extra_context) class Coalesce(Func): """Return, from left to right, the first non-null expression.""" function = "COALESCE" def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError("Coalesce must take at least two expressions") super().__init__(*expressions, **extra) @property def empty_result_set_value(self): for expression in self.get_source_expressions(): result = expression.empty_result_set_value if result is NotImplemented or result is not None: return result return None def as_oracle(self, compiler, connection, **extra_context): # Oracle prohibits mixing TextField (NCLOB) and CharField (NVARCHAR2), # so convert all fields to NCLOB when that type is expected. if self.output_field.get_internal_type() == "TextField": clone = self.copy() clone.set_source_expressions( [ Func(expression, function="TO_NCLOB") for expression in self.get_source_expressions() ] ) return super(Coalesce, clone).as_sql(compiler, connection, **extra_context) return self.as_sql(compiler, connection, **extra_context) class Collate(Func): function = "COLLATE" template = "%(expressions)s %(function)s %(collation)s" # Inspired from # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS collation_re = _lazy_re_compile(r"^[\w\-]+$") def __init__(self, expression, collation): if not (collation and self.collation_re.match(collation)): raise ValueError("Invalid collation name: %r." % collation) self.collation = collation super().__init__(expression) def as_sql(self, compiler, connection, **extra_context): extra_context.setdefault("collation", connection.ops.quote_name(self.collation)) return super().as_sql(compiler, connection, **extra_context) class Greatest(Func): """ Return the maximum expression. If any expression is null the return value is database-specific: On PostgreSQL, the maximum not-null expression is returned. On MySQL, Oracle, and SQLite, if any expression is null, null is returned. """ function = "GREATEST" def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError("Greatest must take at least two expressions") super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): """Use the MAX function on SQLite.""" return super().as_sqlite(compiler, connection, function="MAX", **extra_context) class JSONObject(Func): function = "JSON_OBJECT" output_field = JSONField() def __init__(self, **fields): expressions = [] for key, value in fields.items(): expressions.extend((Value(key), value)) super().__init__(*expressions) def as_sql(self, compiler, connection, **extra_context): if not connection.features.has_json_object_function: raise NotSupportedError( "JSONObject() is not supported on this database backend." ) return super().as_sql(compiler, connection, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): copy = self.copy() copy.set_source_expressions( [ Cast(expression, TextField()) if index % 2 == 0 else expression for index, expression in enumerate(copy.get_source_expressions()) ] ) return super(JSONObject, copy).as_sql( compiler, connection, function="JSONB_BUILD_OBJECT", **extra_context, ) def as_oracle(self, compiler, connection, **extra_context): class ArgJoiner: def join(self, args): args = [" VALUE ".join(arg) for arg in zip(args[::2], args[1::2])] return ", ".join(args) return self.as_sql( compiler, connection, arg_joiner=ArgJoiner(), template="%(function)s(%(expressions)s RETURNING CLOB)", **extra_context, ) class Least(Func): """ Return the minimum expression. If any expression is null the return value is database-specific: On PostgreSQL, return the minimum not-null expression. On MySQL, Oracle, and SQLite, if any expression is null, return null. """ function = "LEAST" def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError("Least must take at least two expressions") super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): """Use the MIN function on SQLite.""" return super().as_sqlite(compiler, connection, function="MIN", **extra_context) class NullIf(Func): function = "NULLIF" arity = 2 def as_oracle(self, compiler, connection, **extra_context): expression1 = self.get_source_expressions()[0] if isinstance(expression1, Value) and expression1.value is None: raise ValueError("Oracle does not allow Value(None) for expression1.") return super().as_sql(compiler, connection, **extra_context)
castiel248/Convert
Lib/site-packages/django/db/models/functions/comparison.py
Python
mit
8,488
from datetime import datetime from django.conf import settings from django.db.models.expressions import Func from django.db.models.fields import ( DateField, DateTimeField, DurationField, Field, IntegerField, TimeField, ) from django.db.models.lookups import ( Transform, YearExact, YearGt, YearGte, YearLt, YearLte, ) from django.utils import timezone class TimezoneMixin: tzinfo = None def get_tzname(self): # Timezone conversions must happen to the input datetime *before* # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the # database as 2016-01-01 01:00:00 +00:00. Any results should be # based on the input datetime not the stored datetime. tzname = None if settings.USE_TZ: if self.tzinfo is None: tzname = timezone.get_current_timezone_name() else: tzname = timezone._get_timezone_name(self.tzinfo) return tzname class Extract(TimezoneMixin, Transform): lookup_name = None output_field = IntegerField() def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): if self.lookup_name is None: self.lookup_name = lookup_name if self.lookup_name is None: raise ValueError("lookup_name must be provided") self.tzinfo = tzinfo super().__init__(expression, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) lhs_output_field = self.lhs.output_field if isinstance(lhs_output_field, DateTimeField): tzname = self.get_tzname() sql, params = connection.ops.datetime_extract_sql( self.lookup_name, sql, tuple(params), tzname ) elif self.tzinfo is not None: raise ValueError("tzinfo can only be used with DateTimeField.") elif isinstance(lhs_output_field, DateField): sql, params = connection.ops.date_extract_sql( self.lookup_name, sql, tuple(params) ) elif isinstance(lhs_output_field, TimeField): sql, params = connection.ops.time_extract_sql( self.lookup_name, sql, tuple(params) ) elif isinstance(lhs_output_field, DurationField): if not connection.features.has_native_duration_field: raise ValueError( "Extract requires native DurationField database support." ) sql, params = connection.ops.time_extract_sql( self.lookup_name, sql, tuple(params) ) else: # resolve_expression has already validated the output_field so this # assert should never be hit. assert False, "Tried to Extract from an invalid type." return sql, params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): copy = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) field = getattr(copy.lhs, "output_field", None) if field is None: return copy if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): raise ValueError( "Extract input expression must be DateField, DateTimeField, " "TimeField, or DurationField." ) # Passing dates to functions expecting datetimes is most likely a mistake. if type(field) == DateField and copy.lookup_name in ( "hour", "minute", "second", ): raise ValueError( "Cannot extract time component '%s' from DateField '%s'." % (copy.lookup_name, field.name) ) if isinstance(field, DurationField) and copy.lookup_name in ( "year", "iso_year", "month", "week", "week_day", "iso_week_day", "quarter", ): raise ValueError( "Cannot extract component '%s' from DurationField '%s'." % (copy.lookup_name, field.name) ) return copy class ExtractYear(Extract): lookup_name = "year" class ExtractIsoYear(Extract): """Return the ISO-8601 week-numbering year.""" lookup_name = "iso_year" class ExtractMonth(Extract): lookup_name = "month" class ExtractDay(Extract): lookup_name = "day" class ExtractWeek(Extract): """ Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week. """ lookup_name = "week" class ExtractWeekDay(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = "week_day" class ExtractIsoWeekDay(Extract): """Return Monday=1 through Sunday=7, based on ISO-8601.""" lookup_name = "iso_week_day" class ExtractQuarter(Extract): lookup_name = "quarter" class ExtractHour(Extract): lookup_name = "hour" class ExtractMinute(Extract): lookup_name = "minute" class ExtractSecond(Extract): lookup_name = "second" DateField.register_lookup(ExtractYear) DateField.register_lookup(ExtractMonth) DateField.register_lookup(ExtractDay) DateField.register_lookup(ExtractWeekDay) DateField.register_lookup(ExtractIsoWeekDay) DateField.register_lookup(ExtractWeek) DateField.register_lookup(ExtractIsoYear) DateField.register_lookup(ExtractQuarter) TimeField.register_lookup(ExtractHour) TimeField.register_lookup(ExtractMinute) TimeField.register_lookup(ExtractSecond) DateTimeField.register_lookup(ExtractHour) DateTimeField.register_lookup(ExtractMinute) DateTimeField.register_lookup(ExtractSecond) ExtractYear.register_lookup(YearExact) ExtractYear.register_lookup(YearGt) ExtractYear.register_lookup(YearGte) ExtractYear.register_lookup(YearLt) ExtractYear.register_lookup(YearLte) ExtractIsoYear.register_lookup(YearExact) ExtractIsoYear.register_lookup(YearGt) ExtractIsoYear.register_lookup(YearGte) ExtractIsoYear.register_lookup(YearLt) ExtractIsoYear.register_lookup(YearLte) class Now(Func): template = "CURRENT_TIMESTAMP" output_field = DateTimeField() def as_postgresql(self, compiler, connection, **extra_context): # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with # other databases. return self.as_sql( compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context ) def as_mysql(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, template="STRFTIME('%%%%Y-%%%%m-%%%%d %%%%H:%%%%M:%%%%f', 'NOW')", **extra_context, ) class TruncBase(TimezoneMixin, Transform): kind = None tzinfo = None # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def __init__( self, expression, output_field=None, tzinfo=None, is_dst=timezone.NOT_PASSED, **extra, ): self.tzinfo = tzinfo self.is_dst = is_dst super().__init__(expression, output_field=output_field, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) tzname = None if isinstance(self.lhs.output_field, DateTimeField): tzname = self.get_tzname() elif self.tzinfo is not None: raise ValueError("tzinfo can only be used with DateTimeField.") if isinstance(self.output_field, DateTimeField): sql, params = connection.ops.datetime_trunc_sql( self.kind, sql, tuple(params), tzname ) elif isinstance(self.output_field, DateField): sql, params = connection.ops.date_trunc_sql( self.kind, sql, tuple(params), tzname ) elif isinstance(self.output_field, TimeField): sql, params = connection.ops.time_trunc_sql( self.kind, sql, tuple(params), tzname ) else: raise ValueError( "Trunc only valid on DateField, TimeField, or DateTimeField." ) return sql, params def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): copy = super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) field = copy.lhs.output_field # DateTimeField is a subclass of DateField so this works for both. if not isinstance(field, (DateField, TimeField)): raise TypeError( "%r isn't a DateField, TimeField, or DateTimeField." % field.name ) # If self.output_field was None, then accessing the field will trigger # the resolver to assign it to self.lhs.output_field. if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): raise ValueError( "output_field must be either DateField, TimeField, or DateTimeField" ) # Passing dates or times to functions expecting datetimes is most # likely a mistake. class_output_field = ( self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None ) output_field = class_output_field or copy.output_field has_explicit_output_field = ( class_output_field or field.__class__ is not copy.output_field.__class__ ) if type(field) == DateField and ( isinstance(output_field, DateTimeField) or copy.kind in ("hour", "minute", "second", "time") ): raise ValueError( "Cannot truncate DateField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else "DateTimeField", ) ) elif isinstance(field, TimeField) and ( isinstance(output_field, DateTimeField) or copy.kind in ("year", "quarter", "month", "week", "day", "date") ): raise ValueError( "Cannot truncate TimeField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else "DateTimeField", ) ) return copy def convert_value(self, value, expression, connection): if isinstance(self.output_field, DateTimeField): if not settings.USE_TZ: pass elif value is not None: value = value.replace(tzinfo=None) value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst) elif not connection.features.has_zoneinfo_database: raise ValueError( "Database returned an invalid datetime value. Are time " "zone definitions for your database installed?" ) elif isinstance(value, datetime): if value is None: pass elif isinstance(self.output_field, DateField): value = value.date() elif isinstance(self.output_field, TimeField): value = value.time() return value class Trunc(TruncBase): # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def __init__( self, expression, kind, output_field=None, tzinfo=None, is_dst=timezone.NOT_PASSED, **extra, ): self.kind = kind super().__init__( expression, output_field=output_field, tzinfo=tzinfo, is_dst=is_dst, **extra ) class TruncYear(TruncBase): kind = "year" class TruncQuarter(TruncBase): kind = "quarter" class TruncMonth(TruncBase): kind = "month" class TruncWeek(TruncBase): """Truncate to midnight on the Monday of the week.""" kind = "week" class TruncDay(TruncBase): kind = "day" class TruncDate(TruncBase): kind = "date" lookup_name = "date" output_field = DateField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. sql, params = compiler.compile(self.lhs) tzname = self.get_tzname() return connection.ops.datetime_cast_date_sql(sql, tuple(params), tzname) class TruncTime(TruncBase): kind = "time" lookup_name = "time" output_field = TimeField() def as_sql(self, compiler, connection): # Cast to time rather than truncate to time. sql, params = compiler.compile(self.lhs) tzname = self.get_tzname() return connection.ops.datetime_cast_time_sql(sql, tuple(params), tzname) class TruncHour(TruncBase): kind = "hour" class TruncMinute(TruncBase): kind = "minute" class TruncSecond(TruncBase): kind = "second" DateTimeField.register_lookup(TruncDate) DateTimeField.register_lookup(TruncTime)
castiel248/Convert
Lib/site-packages/django/db/models/functions/datetime.py
Python
mit
13,658
import math from django.db.models.expressions import Func, Value from django.db.models.fields import FloatField, IntegerField from django.db.models.functions import Cast from django.db.models.functions.mixins import ( FixDecimalInputMixin, NumericOutputFieldMixin, ) from django.db.models.lookups import Transform class Abs(Transform): function = "ABS" lookup_name = "abs" class ACos(NumericOutputFieldMixin, Transform): function = "ACOS" lookup_name = "acos" class ASin(NumericOutputFieldMixin, Transform): function = "ASIN" lookup_name = "asin" class ATan(NumericOutputFieldMixin, Transform): function = "ATAN" lookup_name = "atan" class ATan2(NumericOutputFieldMixin, Func): function = "ATAN2" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr( connection.ops, "spatialite", False ) or connection.ops.spatial_version >= (5, 0, 0): return self.as_sql(compiler, connection) # This function is usually ATan2(y, x), returning the inverse tangent # of y / x, but it's ATan2(x, y) on SpatiaLite < 5.0.0. # Cast integers to float to avoid inconsistent/buggy behavior if the # arguments are mixed between integer and float or decimal. # https://www.gaia-gis.it/fossil/libspatialite/tktview?name=0f72cca3a2 clone = self.copy() clone.set_source_expressions( [ Cast(expression, FloatField()) if isinstance(expression.output_field, IntegerField) else expression for expression in self.get_source_expressions()[::-1] ] ) return clone.as_sql(compiler, connection, **extra_context) class Ceil(Transform): function = "CEILING" lookup_name = "ceil" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="CEIL", **extra_context) class Cos(NumericOutputFieldMixin, Transform): function = "COS" lookup_name = "cos" class Cot(NumericOutputFieldMixin, Transform): function = "COT" lookup_name = "cot" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="(1 / TAN(%(expressions)s))", **extra_context ) class Degrees(NumericOutputFieldMixin, Transform): function = "DEGREES" lookup_name = "degrees" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * 180 / %s)" % math.pi, **extra_context, ) class Exp(NumericOutputFieldMixin, Transform): function = "EXP" lookup_name = "exp" class Floor(Transform): function = "FLOOR" lookup_name = "floor" class Ln(NumericOutputFieldMixin, Transform): function = "LN" lookup_name = "ln" class Log(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "LOG" arity = 2 def as_sqlite(self, compiler, connection, **extra_context): if not getattr(connection.ops, "spatialite", False): return self.as_sql(compiler, connection) # This function is usually Log(b, x) returning the logarithm of x to # the base b, but on SpatiaLite it's Log(x, b). clone = self.copy() clone.set_source_expressions(self.get_source_expressions()[::-1]) return clone.as_sql(compiler, connection, **extra_context) class Mod(FixDecimalInputMixin, NumericOutputFieldMixin, Func): function = "MOD" arity = 2 class Pi(NumericOutputFieldMixin, Func): function = "PI" arity = 0 def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=str(math.pi), **extra_context ) class Power(NumericOutputFieldMixin, Func): function = "POWER" arity = 2 class Radians(NumericOutputFieldMixin, Transform): function = "RADIANS" lookup_name = "radians" def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="((%%(expressions)s) * %s / 180)" % math.pi, **extra_context, ) class Random(NumericOutputFieldMixin, Func): function = "RANDOM" arity = 0 def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="DBMS_RANDOM.VALUE", **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="RAND", **extra_context) def get_group_by_cols(self): return [] class Round(FixDecimalInputMixin, Transform): function = "ROUND" lookup_name = "round" arity = None # Override Transform's arity=1 to enable passing precision. def __init__(self, expression, precision=0, **extra): super().__init__(expression, precision, **extra) def as_sqlite(self, compiler, connection, **extra_context): precision = self.get_source_expressions()[1] if isinstance(precision, Value) and precision.value < 0: raise ValueError("SQLite does not support negative precision.") return super().as_sqlite(compiler, connection, **extra_context) def _resolve_output_field(self): source = self.get_source_expressions()[0] return source.output_field class Sign(Transform): function = "SIGN" lookup_name = "sign" class Sin(NumericOutputFieldMixin, Transform): function = "SIN" lookup_name = "sin" class Sqrt(NumericOutputFieldMixin, Transform): function = "SQRT" lookup_name = "sqrt" class Tan(NumericOutputFieldMixin, Transform): function = "TAN" lookup_name = "tan"
castiel248/Convert
Lib/site-packages/django/db/models/functions/math.py
Python
mit
6,092
import sys from django.db.models.fields import DecimalField, FloatField, IntegerField from django.db.models.functions import Cast class FixDecimalInputMixin: def as_postgresql(self, compiler, connection, **extra_context): # Cast FloatField to DecimalField as PostgreSQL doesn't support the # following function signatures: # - LOG(double, double) # - MOD(double, double) output_field = DecimalField(decimal_places=sys.float_info.dig, max_digits=1000) clone = self.copy() clone.set_source_expressions( [ Cast(expression, output_field) if isinstance(expression.output_field, FloatField) else expression for expression in self.get_source_expressions() ] ) return clone.as_sql(compiler, connection, **extra_context) class FixDurationInputMixin: def as_mysql(self, compiler, connection, **extra_context): sql, params = super().as_sql(compiler, connection, **extra_context) if self.output_field.get_internal_type() == "DurationField": sql = "CAST(%s AS SIGNED)" % sql return sql, params def as_oracle(self, compiler, connection, **extra_context): if self.output_field.get_internal_type() == "DurationField": expression = self.get_source_expressions()[0] options = self._get_repr_options() from django.db.backends.oracle.functions import ( IntervalToSeconds, SecondsToInterval, ) return compiler.compile( SecondsToInterval( self.__class__(IntervalToSeconds(expression), **options) ) ) return super().as_sql(compiler, connection, **extra_context) class NumericOutputFieldMixin: def _resolve_output_field(self): source_fields = self.get_source_fields() if any(isinstance(s, DecimalField) for s in source_fields): return DecimalField() if any(isinstance(s, IntegerField) for s in source_fields): return FloatField() return super()._resolve_output_field() if source_fields else FloatField()
castiel248/Convert
Lib/site-packages/django/db/models/functions/mixins.py
Python
mit
2,229
from django.db import NotSupportedError from django.db.models.expressions import Func, Value from django.db.models.fields import CharField, IntegerField, TextField from django.db.models.functions import Cast, Coalesce from django.db.models.lookups import Transform class MySQLSHA2Mixin: def as_mysql(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="SHA2(%%(expressions)s, %s)" % self.function[3:], **extra_context, ) class OracleHashMixin: def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=( "LOWER(RAWTOHEX(STANDARD_HASH(UTL_I18N.STRING_TO_RAW(" "%(expressions)s, 'AL32UTF8'), '%(function)s')))" ), **extra_context, ) class PostgreSQLSHAMixin: def as_postgresql(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="ENCODE(DIGEST(%(expressions)s, '%(function)s'), 'hex')", function=self.function.lower(), **extra_context, ) class Chr(Transform): function = "CHR" lookup_name = "chr" def as_mysql(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="CHAR", template="%(function)s(%(expressions)s USING utf16)", **extra_context, ) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template="%(function)s(%(expressions)s USING NCHAR_CS)", **extra_context, ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="CHAR", **extra_context) class ConcatPair(Func): """ Concatenate two arguments together. This is used by `Concat` because not all backend databases support more than two arguments. """ function = "CONCAT" def as_sqlite(self, compiler, connection, **extra_context): coalesced = self.coalesce() return super(ConcatPair, coalesced).as_sql( compiler, connection, template="%(expressions)s", arg_joiner=" || ", **extra_context, ) def as_postgresql(self, compiler, connection, **extra_context): copy = self.copy() copy.set_source_expressions( [ Cast(expression, TextField()) for expression in copy.get_source_expressions() ] ) return super(ConcatPair, copy).as_sql( compiler, connection, **extra_context, ) def as_mysql(self, compiler, connection, **extra_context): # Use CONCAT_WS with an empty separator so that NULLs are ignored. return super().as_sql( compiler, connection, function="CONCAT_WS", template="%(function)s('', %(expressions)s)", **extra_context, ) def coalesce(self): # null on either side results in null for expression, wrap with coalesce c = self.copy() c.set_source_expressions( [ Coalesce(expression, Value("")) for expression in c.get_source_expressions() ] ) return c class Concat(Func): """ Concatenate text fields together. Backends that result in an entire null expression when any arguments are null will wrap each argument in coalesce functions to ensure a non-null result. """ function = None template = "%(expressions)s" def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError("Concat must take at least two expressions") paired = self._paired(expressions) super().__init__(paired, **extra) def _paired(self, expressions): # wrap pairs of expressions in successive concat functions # exp = [a, b, c, d] # -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d)))) if len(expressions) == 2: return ConcatPair(*expressions) return ConcatPair(expressions[0], self._paired(expressions[1:])) class Left(Func): function = "LEFT" arity = 2 output_field = CharField() def __init__(self, expression, length, **extra): """ expression: the name of a field, or an expression returning a string length: the number of characters to return from the start of the string """ if not hasattr(length, "resolve_expression"): if length < 1: raise ValueError("'length' must be greater than 0.") super().__init__(expression, length, **extra) def get_substr(self): return Substr(self.source_expressions[0], Value(1), self.source_expressions[1]) def as_oracle(self, compiler, connection, **extra_context): return self.get_substr().as_oracle(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): return self.get_substr().as_sqlite(compiler, connection, **extra_context) class Length(Transform): """Return the number of characters in the expression.""" function = "LENGTH" lookup_name = "length" output_field = IntegerField() def as_mysql(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function="CHAR_LENGTH", **extra_context ) class Lower(Transform): function = "LOWER" lookup_name = "lower" class LPad(Func): function = "LPAD" output_field = CharField() def __init__(self, expression, length, fill_text=Value(" "), **extra): if ( not hasattr(length, "resolve_expression") and length is not None and length < 0 ): raise ValueError("'length' must be greater or equal to 0.") super().__init__(expression, length, fill_text, **extra) class LTrim(Transform): function = "LTRIM" lookup_name = "ltrim" class MD5(OracleHashMixin, Transform): function = "MD5" lookup_name = "md5" class Ord(Transform): function = "ASCII" lookup_name = "ord" output_field = IntegerField() def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="ORD", **extra_context) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="UNICODE", **extra_context) class Repeat(Func): function = "REPEAT" output_field = CharField() def __init__(self, expression, number, **extra): if ( not hasattr(number, "resolve_expression") and number is not None and number < 0 ): raise ValueError("'number' must be greater or equal to 0.") super().__init__(expression, number, **extra) def as_oracle(self, compiler, connection, **extra_context): expression, number = self.source_expressions length = None if number is None else Length(expression) * number rpad = RPad(expression, length, expression) return rpad.as_sql(compiler, connection, **extra_context) class Replace(Func): function = "REPLACE" def __init__(self, expression, text, replacement=Value(""), **extra): super().__init__(expression, text, replacement, **extra) class Reverse(Transform): function = "REVERSE" lookup_name = "reverse" def as_oracle(self, compiler, connection, **extra_context): # REVERSE in Oracle is undocumented and doesn't support multi-byte # strings. Use a special subquery instead. return super().as_sql( compiler, connection, template=( "(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM " "(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s " "FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) " "GROUP BY %(expressions)s)" ), **extra_context, ) class Right(Left): function = "RIGHT" def get_substr(self): return Substr( self.source_expressions[0], self.source_expressions[1] * Value(-1) ) class RPad(LPad): function = "RPAD" class RTrim(Transform): function = "RTRIM" lookup_name = "rtrim" class SHA1(OracleHashMixin, PostgreSQLSHAMixin, Transform): function = "SHA1" lookup_name = "sha1" class SHA224(MySQLSHA2Mixin, PostgreSQLSHAMixin, Transform): function = "SHA224" lookup_name = "sha224" def as_oracle(self, compiler, connection, **extra_context): raise NotSupportedError("SHA224 is not supported on Oracle.") class SHA256(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = "SHA256" lookup_name = "sha256" class SHA384(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = "SHA384" lookup_name = "sha384" class SHA512(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = "SHA512" lookup_name = "sha512" class StrIndex(Func): """ Return a positive integer corresponding to the 1-indexed position of the first occurrence of a substring inside another string, or 0 if the substring is not found. """ function = "INSTR" arity = 2 output_field = IntegerField() def as_postgresql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="STRPOS", **extra_context) class Substr(Func): function = "SUBSTRING" output_field = CharField() def __init__(self, expression, pos, length=None, **extra): """ expression: the name of a field, or an expression returning a string pos: an integer > 0, or an expression returning an integer length: an optional number of characters to return """ if not hasattr(pos, "resolve_expression"): if pos < 1: raise ValueError("'pos' must be greater than 0") expressions = [expression, pos] if length is not None: expressions.append(length) super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="SUBSTR", **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function="SUBSTR", **extra_context) class Trim(Transform): function = "TRIM" lookup_name = "trim" class Upper(Transform): function = "UPPER" lookup_name = "upper"
castiel248/Convert
Lib/site-packages/django/db/models/functions/text.py
Python
mit
11,035
from django.db.models.expressions import Func from django.db.models.fields import FloatField, IntegerField __all__ = [ "CumeDist", "DenseRank", "FirstValue", "Lag", "LastValue", "Lead", "NthValue", "Ntile", "PercentRank", "Rank", "RowNumber", ] class CumeDist(Func): function = "CUME_DIST" output_field = FloatField() window_compatible = True class DenseRank(Func): function = "DENSE_RANK" output_field = IntegerField() window_compatible = True class FirstValue(Func): arity = 1 function = "FIRST_VALUE" window_compatible = True class LagLeadFunction(Func): window_compatible = True def __init__(self, expression, offset=1, default=None, **extra): if expression is None: raise ValueError( "%s requires a non-null source expression." % self.__class__.__name__ ) if offset is None or offset <= 0: raise ValueError( "%s requires a positive integer for the offset." % self.__class__.__name__ ) args = (expression, offset) if default is not None: args += (default,) super().__init__(*args, **extra) def _resolve_output_field(self): sources = self.get_source_expressions() return sources[0].output_field class Lag(LagLeadFunction): function = "LAG" class LastValue(Func): arity = 1 function = "LAST_VALUE" window_compatible = True class Lead(LagLeadFunction): function = "LEAD" class NthValue(Func): function = "NTH_VALUE" window_compatible = True def __init__(self, expression, nth=1, **extra): if expression is None: raise ValueError( "%s requires a non-null source expression." % self.__class__.__name__ ) if nth is None or nth <= 0: raise ValueError( "%s requires a positive integer as for nth." % self.__class__.__name__ ) super().__init__(expression, nth, **extra) def _resolve_output_field(self): sources = self.get_source_expressions() return sources[0].output_field class Ntile(Func): function = "NTILE" output_field = IntegerField() window_compatible = True def __init__(self, num_buckets=1, **extra): if num_buckets <= 0: raise ValueError("num_buckets must be greater than 0.") super().__init__(num_buckets, **extra) class PercentRank(Func): function = "PERCENT_RANK" output_field = FloatField() window_compatible = True class Rank(Func): function = "RANK" output_field = IntegerField() window_compatible = True class RowNumber(Func): function = "ROW_NUMBER" output_field = IntegerField() window_compatible = True
castiel248/Convert
Lib/site-packages/django/db/models/functions/window.py
Python
mit
2,841
from django.db.backends.utils import names_digest, split_identifier from django.db.models.expressions import Col, ExpressionList, F, Func, OrderBy from django.db.models.functions import Collate from django.db.models.query_utils import Q from django.db.models.sql import Query from django.utils.functional import partition __all__ = ["Index"] class Index: suffix = "idx" # The max length of the name of the index (restricted to 30 for # cross-database compatibility with Oracle) max_name_length = 30 def __init__( self, *expressions, fields=(), name=None, db_tablespace=None, opclasses=(), condition=None, include=None, ): if opclasses and not name: raise ValueError("An index must be named to use opclasses.") if not isinstance(condition, (type(None), Q)): raise ValueError("Index.condition must be a Q instance.") if condition and not name: raise ValueError("An index must be named to use condition.") if not isinstance(fields, (list, tuple)): raise ValueError("Index.fields must be a list or tuple.") if not isinstance(opclasses, (list, tuple)): raise ValueError("Index.opclasses must be a list or tuple.") if not expressions and not fields: raise ValueError( "At least one field or expression is required to define an index." ) if expressions and fields: raise ValueError( "Index.fields and expressions are mutually exclusive.", ) if expressions and not name: raise ValueError("An index must be named to use expressions.") if expressions and opclasses: raise ValueError( "Index.opclasses cannot be used with expressions. Use " "django.contrib.postgres.indexes.OpClass() instead." ) if opclasses and len(fields) != len(opclasses): raise ValueError( "Index.fields and Index.opclasses must have the same number of " "elements." ) if fields and not all(isinstance(field, str) for field in fields): raise ValueError("Index.fields must contain only strings with field names.") if include and not name: raise ValueError("A covering index must be named.") if not isinstance(include, (type(None), list, tuple)): raise ValueError("Index.include must be a list or tuple.") self.fields = list(fields) # A list of 2-tuple with the field name and ordering ('' or 'DESC'). self.fields_orders = [ (field_name[1:], "DESC") if field_name.startswith("-") else (field_name, "") for field_name in self.fields ] self.name = name or "" self.db_tablespace = db_tablespace self.opclasses = opclasses self.condition = condition self.include = tuple(include) if include else () self.expressions = tuple( F(expression) if isinstance(expression, str) else expression for expression in expressions ) @property def contains_expressions(self): return bool(self.expressions) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def create_sql(self, model, schema_editor, using="", **kwargs): include = [ model._meta.get_field(field_name).column for field_name in self.include ] condition = self._get_condition_sql(model, schema_editor) if self.expressions: index_expressions = [] for expression in self.expressions: index_expression = IndexExpression(expression) index_expression.set_wrapper_classes(schema_editor.connection) index_expressions.append(index_expression) expressions = ExpressionList(*index_expressions).resolve_expression( Query(model, alias_cols=False), ) fields = None col_suffixes = None else: fields = [ model._meta.get_field(field_name) for field_name, _ in self.fields_orders ] if schema_editor.connection.features.supports_index_column_ordering: col_suffixes = [order[1] for order in self.fields_orders] else: col_suffixes = [""] * len(self.fields_orders) expressions = None return schema_editor._create_index_sql( model, fields=fields, name=self.name, using=using, db_tablespace=self.db_tablespace, col_suffixes=col_suffixes, opclasses=self.opclasses, condition=condition, include=include, expressions=expressions, **kwargs, ) def remove_sql(self, model, schema_editor, **kwargs): return schema_editor._delete_index_sql(model, self.name, **kwargs) def deconstruct(self): path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) path = path.replace("django.db.models.indexes", "django.db.models") kwargs = {"name": self.name} if self.fields: kwargs["fields"] = self.fields if self.db_tablespace is not None: kwargs["db_tablespace"] = self.db_tablespace if self.opclasses: kwargs["opclasses"] = self.opclasses if self.condition: kwargs["condition"] = self.condition if self.include: kwargs["include"] = self.include return (path, self.expressions, kwargs) def clone(self): """Create a copy of this Index.""" _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def set_name_with_model(self, model): """ Generate a unique name for the index. The name is divided into 3 parts - table name (12 chars), field name (8 chars) and unique hash + suffix (10 chars). Each part is made to fit its size by truncating the excess length. """ _, table_name = split_identifier(model._meta.db_table) column_names = [ model._meta.get_field(field_name).column for field_name, order in self.fields_orders ] column_names_with_order = [ (("-%s" if order else "%s") % column_name) for column_name, (field_name, order) in zip( column_names, self.fields_orders ) ] # The length of the parts of the name is based on the default max # length of 30 characters. hash_data = [table_name] + column_names_with_order + [self.suffix] self.name = "%s_%s_%s" % ( table_name[:11], column_names[0][:7], "%s_%s" % (names_digest(*hash_data, length=6), self.suffix), ) if len(self.name) > self.max_name_length: raise ValueError( "Index too long for multiple database support. Is self.suffix " "longer than 3 characters?" ) if self.name[0] == "_" or self.name[0].isdigit(): self.name = "D%s" % self.name[1:] def __repr__(self): return "<%s:%s%s%s%s%s%s%s>" % ( self.__class__.__qualname__, "" if not self.fields else " fields=%s" % repr(self.fields), "" if not self.expressions else " expressions=%s" % repr(self.expressions), "" if not self.name else " name=%s" % repr(self.name), "" if self.db_tablespace is None else " db_tablespace=%s" % repr(self.db_tablespace), "" if self.condition is None else " condition=%s" % self.condition, "" if not self.include else " include=%s" % repr(self.include), "" if not self.opclasses else " opclasses=%s" % repr(self.opclasses), ) def __eq__(self, other): if self.__class__ == other.__class__: return self.deconstruct() == other.deconstruct() return NotImplemented class IndexExpression(Func): """Order and wrap expressions for CREATE INDEX statements.""" template = "%(expressions)s" wrapper_classes = (OrderBy, Collate) def set_wrapper_classes(self, connection=None): # Some databases (e.g. MySQL) treats COLLATE as an indexed expression. if connection and connection.features.collate_as_index_expression: self.wrapper_classes = tuple( [ wrapper_cls for wrapper_cls in self.wrapper_classes if wrapper_cls is not Collate ] ) @classmethod def register_wrappers(cls, *wrapper_classes): cls.wrapper_classes = wrapper_classes def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, ): expressions = list(self.flatten()) # Split expressions and wrappers. index_expressions, wrappers = partition( lambda e: isinstance(e, self.wrapper_classes), expressions, ) wrapper_types = [type(wrapper) for wrapper in wrappers] if len(wrapper_types) != len(set(wrapper_types)): raise ValueError( "Multiple references to %s can't be used in an indexed " "expression." % ", ".join( [wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes] ) ) if expressions[1 : len(wrappers) + 1] != wrappers: raise ValueError( "%s must be topmost expressions in an indexed expression." % ", ".join( [wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes] ) ) # Wrap expressions in parentheses if they are not column references. root_expression = index_expressions[1] resolve_root_expression = root_expression.resolve_expression( query, allow_joins, reuse, summarize, for_save, ) if not isinstance(resolve_root_expression, Col): root_expression = Func(root_expression, template="(%(expressions)s)") if wrappers: # Order wrappers and set their expressions. wrappers = sorted( wrappers, key=lambda w: self.wrapper_classes.index(type(w)), ) wrappers = [wrapper.copy() for wrapper in wrappers] for i, wrapper in enumerate(wrappers[:-1]): wrapper.set_source_expressions([wrappers[i + 1]]) # Set the root expression on the deepest wrapper. wrappers[-1].set_source_expressions([root_expression]) self.set_source_expressions([wrappers[0]]) else: # Use the root expression, if there are no wrappers. self.set_source_expressions([root_expression]) return super().resolve_expression( query, allow_joins, reuse, summarize, for_save ) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context)
castiel248/Convert
Lib/site-packages/django/db/models/indexes.py
Python
mit
11,871
import itertools import math from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Case, Expression, Func, Value, When from django.db.models.fields import ( BooleanField, CharField, DateTimeField, Field, IntegerField, UUIDField, ) from django.db.models.query_utils import RegisterLookupMixin from django.utils.datastructures import OrderedSet from django.utils.functional import cached_property from django.utils.hashable import make_hashable class Lookup(Expression): lookup_name = None prepare_rhs = True can_use_none_as_rhs = False def __init__(self, lhs, rhs): self.lhs, self.rhs = lhs, rhs self.rhs = self.get_prep_lookup() self.lhs = self.get_prep_lhs() if hasattr(self.lhs, "get_bilateral_transforms"): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if bilateral_transforms: # Warn the user as soon as possible if they are trying to apply # a bilateral transformation on a nested QuerySet: that won't work. from django.db.models.sql.query import Query # avoid circular import if isinstance(rhs, Query): raise NotImplementedError( "Bilateral transformations on nested querysets are not implemented." ) self.bilateral_transforms = bilateral_transforms def apply_bilateral_transforms(self, value): for transform in self.bilateral_transforms: value = transform(value) return value def __repr__(self): return f"{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})" def batch_process_rhs(self, compiler, connection, rhs=None): if rhs is None: rhs = self.rhs if self.bilateral_transforms: sqls, sqls_params = [], [] for p in rhs: value = Value(p, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) sql, sql_params = compiler.compile(value) sqls.append(sql) sqls_params.extend(sql_params) else: _, params = self.get_db_prep_lookup(rhs, connection) sqls, sqls_params = ["%s"] * len(params), params return sqls, sqls_params def get_source_expressions(self): if self.rhs_is_direct_value(): return [self.lhs] return [self.lhs, self.rhs] def set_source_expressions(self, new_exprs): if len(new_exprs) == 1: self.lhs = new_exprs[0] else: self.lhs, self.rhs = new_exprs def get_prep_lookup(self): if not self.prepare_rhs or hasattr(self.rhs, "resolve_expression"): return self.rhs if hasattr(self.lhs, "output_field"): if hasattr(self.lhs.output_field, "get_prep_value"): return self.lhs.output_field.get_prep_value(self.rhs) elif self.rhs_is_direct_value(): return Value(self.rhs) return self.rhs def get_prep_lhs(self): if hasattr(self.lhs, "resolve_expression"): return self.lhs return Value(self.lhs) def get_db_prep_lookup(self, value, connection): return ("%s", [value]) def process_lhs(self, compiler, connection, lhs=None): lhs = lhs or self.lhs if hasattr(lhs, "resolve_expression"): lhs = lhs.resolve_expression(compiler.query) sql, params = compiler.compile(lhs) if isinstance(lhs, Lookup): # Wrapped in parentheses to respect operator precedence. sql = f"({sql})" return sql, params def process_rhs(self, compiler, connection): value = self.rhs if self.bilateral_transforms: if self.rhs_is_direct_value(): # Do not call get_db_prep_lookup here as the value will be # transformed before being used for lookup value = Value(value, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) if hasattr(value, "as_sql"): sql, params = compiler.compile(value) # Ensure expression is wrapped in parentheses to respect operator # precedence but avoid double wrapping as it can be misinterpreted # on some backends (e.g. subqueries on SQLite). if sql and sql[0] != "(": sql = "(%s)" % sql return sql, params else: return self.get_db_prep_lookup(value, connection) def rhs_is_direct_value(self): return not hasattr(self.rhs, "as_sql") def get_group_by_cols(self): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def as_oracle(self, compiler, connection): # Oracle doesn't allow EXISTS() and filters to be compared to another # expression unless they're wrapped in a CASE WHEN. wrapped = False exprs = [] for expr in (self.lhs, self.rhs): if connection.ops.conditional_expression_supported_in_where_clause(expr): expr = Case(When(expr, then=True), default=False) wrapped = True exprs.append(expr) lookup = type(self)(*exprs) if wrapped else self return lookup.as_sql(compiler, connection) @cached_property def output_field(self): return BooleanField() @property def identity(self): return self.__class__, self.lhs, self.rhs def __eq__(self, other): if not isinstance(other, Lookup): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(make_hashable(self.identity)) def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): c = self.copy() c.is_summary = summarize c.lhs = self.lhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) if hasattr(self.rhs, "resolve_expression"): c.rhs = self.rhs.resolve_expression( query, allow_joins, reuse, summarize, for_save ) return c def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" return sql, params class Transform(RegisterLookupMixin, Func): """ RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field. """ bilateral = False arity = 1 @property def lhs(self): return self.get_source_expressions()[0] def get_bilateral_transforms(self): if hasattr(self.lhs, "get_bilateral_transforms"): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if self.bilateral: bilateral_transforms.append(self.__class__) return bilateral_transforms class BuiltinLookup(Lookup): def process_lhs(self, compiler, connection, lhs=None): lhs_sql, params = super().process_lhs(compiler, connection, lhs) field_internal_type = self.lhs.output_field.get_internal_type() db_type = self.lhs.output_field.db_type(connection=connection) lhs_sql = connection.ops.field_cast_sql(db_type, field_internal_type) % lhs_sql lhs_sql = ( connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql ) return lhs_sql, list(params) def as_sql(self, compiler, connection): lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) return "%s %s" % (lhs_sql, rhs_sql), params def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs class FieldGetDbPrepValueMixin: """ Some lookups require Field.get_db_prep_value() to be called on their inputs. """ get_db_prep_lookup_value_is_iterable = False def get_db_prep_lookup(self, value, connection): # For relational fields, use the 'target_field' attribute of the # output_field. field = getattr(self.lhs.output_field, "target_field", None) get_db_prep_value = ( getattr(field, "get_db_prep_value", None) or self.lhs.output_field.get_db_prep_value ) return ( "%s", [get_db_prep_value(v, connection, prepared=True) for v in value] if self.get_db_prep_lookup_value_is_iterable else [get_db_prep_value(value, connection, prepared=True)], ) class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): """ Some lookups require Field.get_db_prep_value() to be called on each value in an iterable. """ get_db_prep_lookup_value_is_iterable = True def get_prep_lookup(self): if hasattr(self.rhs, "resolve_expression"): return self.rhs prepared_values = [] for rhs_value in self.rhs: if hasattr(rhs_value, "resolve_expression"): # An expression will be handled by the database but can coexist # alongside real values. pass elif self.prepare_rhs and hasattr(self.lhs.output_field, "get_prep_value"): rhs_value = self.lhs.output_field.get_prep_value(rhs_value) prepared_values.append(rhs_value) return prepared_values def process_rhs(self, compiler, connection): if self.rhs_is_direct_value(): # rhs should be an iterable of values. Use batch_process_rhs() # to prepare/transform those values. return self.batch_process_rhs(compiler, connection) else: return super().process_rhs(compiler, connection) def resolve_expression_parameter(self, compiler, connection, sql, param): params = [param] if hasattr(param, "resolve_expression"): param = param.resolve_expression(compiler.query) if hasattr(param, "as_sql"): sql, params = compiler.compile(param) return sql, params def batch_process_rhs(self, compiler, connection, rhs=None): pre_processed = super().batch_process_rhs(compiler, connection, rhs) # The params list may contain expressions which compile to a # sql/param pair. Zip them to get sql and param pairs that refer to the # same argument and attempt to replace them with the result of # compiling the param step. sql, params = zip( *( self.resolve_expression_parameter(compiler, connection, sql, param) for sql, param in zip(*pre_processed) ) ) params = itertools.chain.from_iterable(params) return sql, tuple(params) class PostgresOperatorLookup(Lookup): """Lookup defined by operators on PostgreSQL.""" postgres_operator = None def as_postgresql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return "%s %s %s" % (lhs, self.postgres_operator, rhs), params @Field.register_lookup class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "exact" def get_prep_lookup(self): from django.db.models.sql.query import Query # avoid circular import if isinstance(self.rhs, Query): if self.rhs.has_limit_one(): if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(["pk"]) else: raise ValueError( "The QuerySet value for an exact lookup must be limited to " "one result using slicing." ) return super().get_prep_lookup() def as_sql(self, compiler, connection): # Avoid comparison against direct rhs if lhs is a boolean value. That # turns "boolfield__exact=True" into "WHERE boolean_field" instead of # "WHERE boolean_field = True" when allowed. if ( isinstance(self.rhs, bool) and getattr(self.lhs, "conditional", False) and connection.ops.conditional_expression_supported_in_where_clause( self.lhs ) ): lhs_sql, params = self.process_lhs(compiler, connection) template = "%s" if self.rhs else "NOT %s" return template % lhs_sql, params return super().as_sql(compiler, connection) @Field.register_lookup class IExact(BuiltinLookup): lookup_name = "iexact" prepare_rhs = False def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if params: params[0] = connection.ops.prep_for_iexact_query(params[0]) return rhs, params @Field.register_lookup class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "gt" @Field.register_lookup class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "gte" @Field.register_lookup class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "lt" @Field.register_lookup class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = "lte" class IntegerFieldFloatRounding: """ Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded. """ def get_prep_lookup(self): if isinstance(self.rhs, float): self.rhs = math.ceil(self.rhs) return super().get_prep_lookup() @IntegerField.register_lookup class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual): pass @IntegerField.register_lookup class IntegerLessThan(IntegerFieldFloatRounding, LessThan): pass @Field.register_lookup class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = "in" def get_prep_lookup(self): from django.db.models.sql.query import Query # avoid circular import if isinstance(self.rhs, Query): self.rhs.clear_ordering(clear_default=True) if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(["pk"]) return super().get_prep_lookup() def process_rhs(self, compiler, connection): db_rhs = getattr(self.rhs, "_db", None) if db_rhs is not None and db_rhs != connection.alias: raise ValueError( "Subqueries aren't allowed across different databases. Force " "the inner query to be evaluated using `list(inner_query)`." ) if self.rhs_is_direct_value(): # Remove None from the list as NULL is never equal to anything. try: rhs = OrderedSet(self.rhs) rhs.discard(None) except TypeError: # Unhashable items in self.rhs rhs = [r for r in self.rhs if r is not None] if not rhs: raise EmptyResultSet # rhs should be an iterable; use batch_process_rhs() to # prepare/transform those values. sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) placeholder = "(" + ", ".join(sqls) + ")" return (placeholder, sqls_params) return super().process_rhs(compiler, connection) def get_rhs_op(self, connection, rhs): return "IN %s" % rhs def as_sql(self, compiler, connection): max_in_list_size = connection.ops.max_in_list_size() if ( self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size ): return self.split_parameter_list_as_sql(compiler, connection) return super().as_sql(compiler, connection) def split_parameter_list_as_sql(self, compiler, connection): # This is a special case for databases which limit the number of # elements which can appear in an 'IN' clause. max_in_list_size = connection.ops.max_in_list_size() lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.batch_process_rhs(compiler, connection) in_clause_elements = ["("] params = [] for offset in range(0, len(rhs_params), max_in_list_size): if offset > 0: in_clause_elements.append(" OR ") in_clause_elements.append("%s IN (" % lhs) params.extend(lhs_params) sqls = rhs[offset : offset + max_in_list_size] sqls_params = rhs_params[offset : offset + max_in_list_size] param_group = ", ".join(sqls) in_clause_elements.append(param_group) in_clause_elements.append(")") params.extend(sqls_params) in_clause_elements.append(")") return "".join(in_clause_elements), params class PatternLookup(BuiltinLookup): param_pattern = "%%%s%%" prepare_rhs = False def get_rhs_op(self, connection, rhs): # Assume we are in startswith. We need to produce SQL like: # col LIKE %s, ['thevalue%'] # For python values we can (and should) do that directly in Python, # but if the value is for example reference to other column, then # we need to add the % pattern match to the lookup by something like # col LIKE othercol || '%%' # So, for Python values we don't need any special pattern, but for # SQL reference values or SQL transformations we need the correct # pattern added. if hasattr(self.rhs, "as_sql") or self.bilateral_transforms: pattern = connection.pattern_ops[self.lookup_name].format( connection.pattern_esc ) return pattern.format(rhs) else: return super().get_rhs_op(connection, rhs) def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = self.param_pattern % connection.ops.prep_for_like_query( params[0] ) return rhs, params @Field.register_lookup class Contains(PatternLookup): lookup_name = "contains" @Field.register_lookup class IContains(Contains): lookup_name = "icontains" @Field.register_lookup class StartsWith(PatternLookup): lookup_name = "startswith" param_pattern = "%s%%" @Field.register_lookup class IStartsWith(StartsWith): lookup_name = "istartswith" @Field.register_lookup class EndsWith(PatternLookup): lookup_name = "endswith" param_pattern = "%%%s" @Field.register_lookup class IEndsWith(EndsWith): lookup_name = "iendswith" @Field.register_lookup class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = "range" def get_rhs_op(self, connection, rhs): return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) @Field.register_lookup class IsNull(BuiltinLookup): lookup_name = "isnull" prepare_rhs = False def as_sql(self, compiler, connection): if not isinstance(self.rhs, bool): raise ValueError( "The QuerySet value for an isnull lookup must be True or False." ) sql, params = self.process_lhs(compiler, connection) if self.rhs: return "%s IS NULL" % sql, params else: return "%s IS NOT NULL" % sql, params @Field.register_lookup class Regex(BuiltinLookup): lookup_name = "regex" prepare_rhs = False def as_sql(self, compiler, connection): if self.lookup_name in connection.operators: return super().as_sql(compiler, connection) else: lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) sql_template = connection.ops.regex_lookup(self.lookup_name) return sql_template % (lhs, rhs), lhs_params + rhs_params @Field.register_lookup class IRegex(Regex): lookup_name = "iregex" class YearLookup(Lookup): def year_lookup_bounds(self, connection, year): from django.db.models.functions import ExtractIsoYear iso_year = isinstance(self.lhs, ExtractIsoYear) output_field = self.lhs.lhs.output_field if isinstance(output_field, DateTimeField): bounds = connection.ops.year_lookup_bounds_for_datetime_field( year, iso_year=iso_year, ) else: bounds = connection.ops.year_lookup_bounds_for_date_field( year, iso_year=iso_year, ) return bounds def as_sql(self, compiler, connection): # Avoid the extract operation if the rhs is a direct value to allow # indexes to be used. if self.rhs_is_direct_value(): # Skip the extract part by directly using the originating field, # that is self.lhs.lhs. lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, _ = self.process_rhs(compiler, connection) rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) start, finish = self.year_lookup_bounds(connection, self.rhs) params.extend(self.get_bound_params(start, finish)) return "%s %s" % (lhs_sql, rhs_sql), params return super().as_sql(compiler, connection) def get_direct_rhs_sql(self, connection, rhs): return connection.operators[self.lookup_name] % rhs def get_bound_params(self, start, finish): raise NotImplementedError( "subclasses of YearLookup must provide a get_bound_params() method" ) class YearExact(YearLookup, Exact): def get_direct_rhs_sql(self, connection, rhs): return "BETWEEN %s AND %s" def get_bound_params(self, start, finish): return (start, finish) class YearGt(YearLookup, GreaterThan): def get_bound_params(self, start, finish): return (finish,) class YearGte(YearLookup, GreaterThanOrEqual): def get_bound_params(self, start, finish): return (start,) class YearLt(YearLookup, LessThan): def get_bound_params(self, start, finish): return (start,) class YearLte(YearLookup, LessThanOrEqual): def get_bound_params(self, start, finish): return (finish,) class UUIDTextMixin: """ Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID. """ def process_rhs(self, qn, connection): if not connection.features.has_native_uuid_field: from django.db.models.functions import Replace if self.rhs_is_direct_value(): self.rhs = Value(self.rhs) self.rhs = Replace( self.rhs, Value("-"), Value(""), output_field=CharField() ) rhs, params = super().process_rhs(qn, connection) return rhs, params @UUIDField.register_lookup class UUIDIExact(UUIDTextMixin, IExact): pass @UUIDField.register_lookup class UUIDContains(UUIDTextMixin, Contains): pass @UUIDField.register_lookup class UUIDIContains(UUIDTextMixin, IContains): pass @UUIDField.register_lookup class UUIDStartsWith(UUIDTextMixin, StartsWith): pass @UUIDField.register_lookup class UUIDIStartsWith(UUIDTextMixin, IStartsWith): pass @UUIDField.register_lookup class UUIDEndsWith(UUIDTextMixin, EndsWith): pass @UUIDField.register_lookup class UUIDIEndsWith(UUIDTextMixin, IEndsWith): pass
castiel248/Convert
Lib/site-packages/django/db/models/lookups.py
Python
mit
24,608
import copy import inspect from functools import wraps from importlib import import_module from django.db import router from django.db.models.query import QuerySet class BaseManager: # To retain order, track each time a Manager instance is created. creation_counter = 0 # Set to True for the 'objects' managers that are automatically created. auto_created = False #: If set to True the manager will be serialized into migrations and will #: thus be available in e.g. RunPython operations. use_in_migrations = False def __new__(cls, *args, **kwargs): # Capture the arguments to make returning them trivial. obj = super().__new__(cls) obj._constructor_args = (args, kwargs) return obj def __init__(self): super().__init__() self._set_creation_counter() self.model = None self.name = None self._db = None self._hints = {} def __str__(self): """Return "app_label.model_label.manager_name".""" return "%s.%s" % (self.model._meta.label, self.name) def __class_getitem__(cls, *args, **kwargs): return cls def deconstruct(self): """ Return a 5-tuple of the form (as_manager (True), manager_class, queryset_class, args, kwargs). Raise a ValueError if the manager is dynamically generated. """ qs_class = self._queryset_class if getattr(self, "_built_with_as_manager", False): # using MyQuerySet.as_manager() return ( True, # as_manager None, # manager_class "%s.%s" % (qs_class.__module__, qs_class.__name__), # qs_class None, # args None, # kwargs ) else: module_name = self.__module__ name = self.__class__.__name__ # Make sure it's actually there and not an inner class module = import_module(module_name) if not hasattr(module, name): raise ValueError( "Could not find manager %s in %s.\n" "Please note that you need to inherit from managers you " "dynamically generated with 'from_queryset()'." % (name, module_name) ) return ( False, # as_manager "%s.%s" % (module_name, name), # manager_class None, # qs_class self._constructor_args[0], # args self._constructor_args[1], # kwargs ) def check(self, **kwargs): return [] @classmethod def _get_queryset_methods(cls, queryset_class): def create_method(name, method): @wraps(method) def manager_method(self, *args, **kwargs): return getattr(self.get_queryset(), name)(*args, **kwargs) return manager_method new_methods = {} for name, method in inspect.getmembers( queryset_class, predicate=inspect.isfunction ): # Only copy missing methods. if hasattr(cls, name): continue # Only copy public methods or methods with the attribute # queryset_only=False. queryset_only = getattr(method, "queryset_only", None) if queryset_only or (queryset_only is None and name.startswith("_")): continue # Copy the method onto the manager. new_methods[name] = create_method(name, method) return new_methods @classmethod def from_queryset(cls, queryset_class, class_name=None): if class_name is None: class_name = "%sFrom%s" % (cls.__name__, queryset_class.__name__) return type( class_name, (cls,), { "_queryset_class": queryset_class, **cls._get_queryset_methods(queryset_class), }, ) def contribute_to_class(self, cls, name): self.name = self.name or name self.model = cls setattr(cls, name, ManagerDescriptor(self)) cls._meta.add_manager(self) def _set_creation_counter(self): """ Set the creation counter value for this instance and increment the class-level copy. """ self.creation_counter = BaseManager.creation_counter BaseManager.creation_counter += 1 def db_manager(self, using=None, hints=None): obj = copy.copy(self) obj._db = using or self._db obj._hints = hints or self._hints return obj @property def db(self): return self._db or router.db_for_read(self.model, **self._hints) ####################### # PROXIES TO QUERYSET # ####################### def get_queryset(self): """ Return a new QuerySet object. Subclasses can override this method to customize the behavior of the Manager. """ return self._queryset_class(model=self.model, using=self._db, hints=self._hints) def all(self): # We can't proxy this method through the `QuerySet` like we do for the # rest of the `QuerySet` methods. This is because `QuerySet.all()` # works by creating a "copy" of the current queryset and in making said # copy, all the cached `prefetch_related` lookups are lost. See the # implementation of `RelatedManager.get_queryset()` for a better # understanding of how this comes into play. return self.get_queryset() def __eq__(self, other): return ( isinstance(other, self.__class__) and self._constructor_args == other._constructor_args ) def __hash__(self): return id(self) class Manager(BaseManager.from_queryset(QuerySet)): pass class ManagerDescriptor: def __init__(self, manager): self.manager = manager def __get__(self, instance, cls=None): if instance is not None: raise AttributeError( "Manager isn't accessible via %s instances" % cls.__name__ ) if cls._meta.abstract: raise AttributeError( "Manager isn't available; %s is abstract" % (cls._meta.object_name,) ) if cls._meta.swapped: raise AttributeError( "Manager isn't available; '%s' has been swapped for '%s'" % ( cls._meta.label, cls._meta.swapped, ) ) return cls._meta.managers_map[self.manager.name] class EmptyManager(Manager): def __init__(self, model): super().__init__() self.model = model def get_queryset(self): return super().get_queryset().none()
castiel248/Convert
Lib/site-packages/django/db/models/manager.py
Python
mit
6,866
import bisect import copy import inspect import warnings from collections import defaultdict from django.apps import apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist, ImproperlyConfigured from django.db import connections from django.db.models import AutoField, Manager, OrderWrt, UniqueConstraint from django.db.models.query_utils import PathInfo from django.utils.datastructures import ImmutableList, OrderedSet from django.utils.deprecation import RemovedInDjango51Warning from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.text import camel_case_to_spaces, format_lazy from django.utils.translation import override PROXY_PARENTS = object() EMPTY_RELATION_TREE = () IMMUTABLE_WARNING = ( "The return type of '%s' should never be mutated. If you want to manipulate this " "list for your own use, make a copy first." ) DEFAULT_NAMES = ( "verbose_name", "verbose_name_plural", "db_table", "db_table_comment", "ordering", "unique_together", "permissions", "get_latest_by", "order_with_respect_to", "app_label", "db_tablespace", "abstract", "managed", "proxy", "swappable", "auto_created", # Must be kept for backward compatibility with old migrations. "index_together", "apps", "default_permissions", "select_on_save", "default_related_name", "required_db_features", "required_db_vendor", "base_manager_name", "default_manager_name", "indexes", "constraints", ) def normalize_together(option_together): """ option_together can be either a tuple of tuples, or a single tuple of two strings. Normalize it to a tuple of tuples, so that calling code can uniformly expect that. """ try: if not option_together: return () if not isinstance(option_together, (tuple, list)): raise TypeError first_element = option_together[0] if not isinstance(first_element, (tuple, list)): option_together = (option_together,) # Normalize everything to tuples return tuple(tuple(ot) for ot in option_together) except TypeError: # If the value of option_together isn't valid, return it # verbatim; this will be picked up by the check framework later. return option_together def make_immutable_fields_list(name, data): return ImmutableList(data, warning=IMMUTABLE_WARNING % name) class Options: FORWARD_PROPERTIES = { "fields", "many_to_many", "concrete_fields", "local_concrete_fields", "_non_pk_concrete_field_names", "_forward_fields_map", "managers", "managers_map", "base_manager", "default_manager", } REVERSE_PROPERTIES = {"related_objects", "fields_map", "_relation_tree"} default_apps = apps def __init__(self, meta, app_label=None): self._get_fields_cache = {} self.local_fields = [] self.local_many_to_many = [] self.private_fields = [] self.local_managers = [] self.base_manager_name = None self.default_manager_name = None self.model_name = None self.verbose_name = None self.verbose_name_plural = None self.db_table = "" self.db_table_comment = "" self.ordering = [] self._ordering_clash = False self.indexes = [] self.constraints = [] self.unique_together = [] self.index_together = [] # RemovedInDjango51Warning. self.select_on_save = False self.default_permissions = ("add", "change", "delete", "view") self.permissions = [] self.object_name = None self.app_label = app_label self.get_latest_by = None self.order_with_respect_to = None self.db_tablespace = settings.DEFAULT_TABLESPACE self.required_db_features = [] self.required_db_vendor = None self.meta = meta self.pk = None self.auto_field = None self.abstract = False self.managed = True self.proxy = False # For any class that is a proxy (including automatically created # classes for deferred object loading), proxy_for_model tells us # which class this model is proxying. Note that proxy_for_model # can create a chain of proxy models. For non-proxy models, the # variable is always None. self.proxy_for_model = None # For any non-abstract class, the concrete class is the model # in the end of the proxy_for_model chain. In particular, for # concrete models, the concrete_model is always the class itself. self.concrete_model = None self.swappable = None self.parents = {} self.auto_created = False # List of all lookups defined in ForeignKey 'limit_choices_to' options # from *other* models. Needed for some admin checks. Internal use only. self.related_fkey_lookups = [] # A custom app registry to use, if you're making a separate model set. self.apps = self.default_apps self.default_related_name = None @property def label(self): return "%s.%s" % (self.app_label, self.object_name) @property def label_lower(self): return "%s.%s" % (self.app_label, self.model_name) @property def app_config(self): # Don't go through get_app_config to avoid triggering imports. return self.apps.app_configs.get(self.app_label) def contribute_to_class(self, cls, name): from django.db import connection from django.db.backends.utils import truncate_name cls._meta = self self.model = cls # First, construct the default values for these options. self.object_name = cls.__name__ self.model_name = self.object_name.lower() self.verbose_name = camel_case_to_spaces(self.object_name) # Store the original user-defined values for each option, # for use when serializing the model definition self.original_attrs = {} # Next, apply any overridden values from 'class Meta'. if self.meta: meta_attrs = self.meta.__dict__.copy() for name in self.meta.__dict__: # Ignore any private attributes that Django doesn't care about. # NOTE: We can't modify a dictionary's contents while looping # over it, so we loop over the *original* dictionary instead. if name.startswith("_"): del meta_attrs[name] for attr_name in DEFAULT_NAMES: if attr_name in meta_attrs: setattr(self, attr_name, meta_attrs.pop(attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) elif hasattr(self.meta, attr_name): setattr(self, attr_name, getattr(self.meta, attr_name)) self.original_attrs[attr_name] = getattr(self, attr_name) self.unique_together = normalize_together(self.unique_together) self.index_together = normalize_together(self.index_together) if self.index_together: warnings.warn( f"'index_together' is deprecated. Use 'Meta.indexes' in " f"{self.label!r} instead.", RemovedInDjango51Warning, ) # App label/class name interpolation for names of constraints and # indexes. if not getattr(cls._meta, "abstract", False): for attr_name in {"constraints", "indexes"}: objs = getattr(self, attr_name, []) setattr(self, attr_name, self._format_names_with_class(cls, objs)) # verbose_name_plural is a special case because it uses a 's' # by default. if self.verbose_name_plural is None: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) # order_with_respect_and ordering are mutually exclusive. self._ordering_clash = bool(self.ordering and self.order_with_respect_to) # Any leftover attributes must be invalid. if meta_attrs != {}: raise TypeError( "'class Meta' got invalid attribute(s): %s" % ",".join(meta_attrs) ) else: self.verbose_name_plural = format_lazy("{}s", self.verbose_name) del self.meta # If the db_table wasn't provided, use the app_label + model_name. if not self.db_table: self.db_table = "%s_%s" % (self.app_label, self.model_name) self.db_table = truncate_name( self.db_table, connection.ops.max_name_length() ) def _format_names_with_class(self, cls, objs): """App label/class name interpolation for object names.""" new_objs = [] for obj in objs: obj = obj.clone() obj.name = obj.name % { "app_label": cls._meta.app_label.lower(), "class": cls.__name__.lower(), } new_objs.append(obj) return new_objs def _get_default_pk_class(self): pk_class_path = getattr( self.app_config, "default_auto_field", settings.DEFAULT_AUTO_FIELD, ) if self.app_config and self.app_config._is_default_auto_field_overridden: app_config_class = type(self.app_config) source = ( f"{app_config_class.__module__}." f"{app_config_class.__qualname__}.default_auto_field" ) else: source = "DEFAULT_AUTO_FIELD" if not pk_class_path: raise ImproperlyConfigured(f"{source} must not be empty.") try: pk_class = import_string(pk_class_path) except ImportError as e: msg = ( f"{source} refers to the module '{pk_class_path}' that could " f"not be imported." ) raise ImproperlyConfigured(msg) from e if not issubclass(pk_class, AutoField): raise ValueError( f"Primary key '{pk_class_path}' referred by {source} must " f"subclass AutoField." ) return pk_class def _prepare(self, model): if self.order_with_respect_to: # The app registry will not be ready at this point, so we cannot # use get_field(). query = self.order_with_respect_to try: self.order_with_respect_to = next( f for f in self._get_fields(reverse=False) if f.name == query or f.attname == query ) except StopIteration: raise FieldDoesNotExist( "%s has no field named '%s'" % (self.object_name, query) ) self.ordering = ("_order",) if not any( isinstance(field, OrderWrt) for field in model._meta.local_fields ): model.add_to_class("_order", OrderWrt()) else: self.order_with_respect_to = None if self.pk is None: if self.parents: # Promote the first parent link in lieu of adding yet another # field. field = next(iter(self.parents.values())) # Look for a local field with the same name as the # first parent link. If a local field has already been # created, use it instead of promoting the parent already_created = [ fld for fld in self.local_fields if fld.name == field.name ] if already_created: field = already_created[0] field.primary_key = True self.setup_pk(field) else: pk_class = self._get_default_pk_class() auto = pk_class(verbose_name="ID", primary_key=True, auto_created=True) model.add_to_class("id", auto) def add_manager(self, manager): self.local_managers.append(manager) self._expire_cache() def add_field(self, field, private=False): # Insert the given field in the order in which it was created, using # the "creation_counter" attribute of the field. # Move many-to-many related fields from self.fields into # self.many_to_many. if private: self.private_fields.append(field) elif field.is_relation and field.many_to_many: bisect.insort(self.local_many_to_many, field) else: bisect.insort(self.local_fields, field) self.setup_pk(field) # If the field being added is a relation to another known field, # expire the cache on this field and the forward cache on the field # being referenced, because there will be new relationships in the # cache. Otherwise, expire the cache of references *to* this field. # The mechanism for getting at the related model is slightly odd - # ideally, we'd just ask for field.related_model. However, related_model # is a cached property, and all the models haven't been loaded yet, so # we need to make sure we don't cache a string reference. if ( field.is_relation and hasattr(field.remote_field, "model") and field.remote_field.model ): try: field.remote_field.model._meta._expire_cache(forward=False) except AttributeError: pass self._expire_cache() else: self._expire_cache(reverse=False) def setup_pk(self, field): if not self.pk and field.primary_key: self.pk = field field.serialize = False def setup_proxy(self, target): """ Do the internal setup so that the current model is a proxy for "target". """ self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table def __repr__(self): return "<Options for %s>" % self.object_name def __str__(self): return self.label_lower def can_migrate(self, connection): """ Return True if the model can/should be migrated on the `connection`. `connection` can be either a real connection or a connection alias. """ if self.proxy or self.swapped or not self.managed: return False if isinstance(connection, str): connection = connections[connection] if self.required_db_vendor: return self.required_db_vendor == connection.vendor if self.required_db_features: return all( getattr(connection.features, feat, False) for feat in self.required_db_features ) return True @property def verbose_name_raw(self): """Return the untranslated verbose name.""" with override(None): return str(self.verbose_name) @property def swapped(self): """ Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here. """ if self.swappable: swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: swapped_label, swapped_object = swapped_for.split(".") except ValueError: # setting not in the format app_label.model_name # raising ImproperlyConfigured here causes problems with # test cleanup code - instead it is raised in get_user_model # or as part of validation. return swapped_for if ( "%s.%s" % (swapped_label, swapped_object.lower()) != self.label_lower ): return swapped_for return None @cached_property def managers(self): managers = [] seen_managers = set() bases = (b for b in self.model.mro() if hasattr(b, "_meta")) for depth, base in enumerate(bases): for manager in base._meta.local_managers: if manager.name in seen_managers: continue manager = copy.copy(manager) manager.model = self.model seen_managers.add(manager.name) managers.append((depth, manager.creation_counter, manager)) return make_immutable_fields_list( "managers", (m[2] for m in sorted(managers)), ) @cached_property def managers_map(self): return {manager.name: manager for manager in self.managers} @cached_property def base_manager(self): base_manager_name = self.base_manager_name if not base_manager_name: # Get the first parent's base_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, "_meta"): if parent._base_manager.name != "_base_manager": base_manager_name = parent._base_manager.name break if base_manager_name: try: return self.managers_map[base_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, base_manager_name, ) ) manager = Manager() manager.name = "_base_manager" manager.model = self.model manager.auto_created = True return manager @cached_property def default_manager(self): default_manager_name = self.default_manager_name if not default_manager_name and not self.local_managers: # Get the first parent's default_manager_name if there's one. for parent in self.model.mro()[1:]: if hasattr(parent, "_meta"): default_manager_name = parent._meta.default_manager_name break if default_manager_name: try: return self.managers_map[default_manager_name] except KeyError: raise ValueError( "%s has no manager named %r" % ( self.object_name, default_manager_name, ) ) if self.managers: return self.managers[0] @cached_property def fields(self): """ Return a list of all forward fields on the model and its parents, excluding ManyToManyFields. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ # For legacy reasons, the fields property should only contain forward # fields that are not private or with a m2m cardinality. Therefore we # pass these three filters as filters to the generator. # The third lambda is a longwinded way of checking f.related_model - we don't # use that property directly because related_model is a cached property, # and all the models may not have been loaded yet; we don't want to cache # the string reference to the related_model. def is_not_an_m2m_field(f): return not (f.is_relation and f.many_to_many) def is_not_a_generic_relation(f): return not (f.is_relation and f.one_to_many) def is_not_a_generic_foreign_key(f): return not ( f.is_relation and f.many_to_one and not (hasattr(f.remote_field, "model") and f.remote_field.model) ) return make_immutable_fields_list( "fields", ( f for f in self._get_fields(reverse=False) if is_not_an_m2m_field(f) and is_not_a_generic_relation(f) and is_not_a_generic_foreign_key(f) ), ) @cached_property def concrete_fields(self): """ Return a list of all concrete fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "concrete_fields", (f for f in self.fields if f.concrete) ) @cached_property def local_concrete_fields(self): """ Return a list of all concrete fields on the model. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ return make_immutable_fields_list( "local_concrete_fields", (f for f in self.local_fields if f.concrete) ) @cached_property def many_to_many(self): """ Return a list of all many to many fields on the model and its parents. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this list. """ return make_immutable_fields_list( "many_to_many", ( f for f in self._get_fields(reverse=False) if f.is_relation and f.many_to_many ), ) @cached_property def related_objects(self): """ Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list. """ all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), ) @cached_property def _forward_fields_map(self): res = {} fields = self._get_fields(reverse=False) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res @cached_property def fields_map(self): res = {} fields = self._get_fields(forward=False, include_hidden=True) for field in fields: res[field.name] = field # Due to the way Django's internals work, get_field() should also # be able to fetch a field by attname. In the case of a concrete # field with relation, includes the *_id name too try: res[field.attname] = field except AttributeError: pass return res def get_field(self, field_name): """ Return a field instance given the name of a forward or reverse field. """ try: # In order to avoid premature loading of the relation tree # (expensive) we prefer checking if the field is a forward field. return self._forward_fields_map[field_name] except KeyError: # If the app registry is not ready, reverse fields are # unavailable, therefore we throw a FieldDoesNotExist exception. if not self.apps.models_ready: raise FieldDoesNotExist( "%s has no field named '%s'. The app cache isn't ready yet, " "so if this is an auto-created related field, it won't " "be available yet." % (self.object_name, field_name) ) try: # Retrieve field instance by name from cached or just-computed # field map. return self.fields_map[field_name] except KeyError: raise FieldDoesNotExist( "%s has no field named '%s'" % (self.object_name, field_name) ) def get_base_chain(self, model): """ Return a list of parent classes leading to `model` (ordered from closest to most distant ancestor). This has to handle the case where `model` is a grandparent or even more distant relation. """ if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return [] def get_parent_list(self): """ Return all the ancestors of this model as a list ordered by MRO. Useful for determining if something is an ancestor, regardless of lineage. """ result = OrderedSet(self.parents) for parent in self.parents: for ancestor in parent._meta.get_parent_list(): result.add(ancestor) return list(result) def get_ancestor_link(self, ancestor): """ Return the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Return None if the model isn't an ancestor of this one. """ if ancestor in self.parents: return self.parents[ancestor] for parent in self.parents: # Tries to get a link field from the immediate parent parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: # In case of a proxied model, the first link # of the chain to the ancestor is that parent # links return self.parents[parent] or parent_link def get_path_to_parent(self, parent): """ Return a list of PathInfos containing the path from the current model to the parent model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] # Skip the chain of proxy to the concrete proxied model. proxied_model = self.concrete_model path = [] opts = self for int_model in self.get_base_chain(parent): if int_model is proxied_model: opts = int_model._meta else: final_field = opts.parents[int_model] targets = (final_field.remote_field.get_related_field(),) opts = int_model._meta path.append( PathInfo( from_opts=final_field.model._meta, to_opts=opts, target_fields=targets, join_field=final_field, m2m=False, direct=True, filtered_relation=None, ) ) return path def get_path_from_parent(self, parent): """ Return a list of PathInfos containing the path from the parent model to the current model, or an empty list if parent is not a parent of the current model. """ if self.model is parent: return [] model = self.concrete_model # Get a reversed base chain including both the current and parent # models. chain = model._meta.get_base_chain(parent) chain.reverse() chain.append(model) # Construct a list of the PathInfos between models in chain. path = [] for i, ancestor in enumerate(chain[:-1]): child = chain[i + 1] link = child._meta.get_ancestor_link(ancestor) path.extend(link.reverse_path_infos) return path def _populate_directed_relation_graph(self): """ This method is used by each model to find its reverse objects. As this method is very expensive and is accessed frequently (it looks up every field in a model, in every app), it is computed on first access and then is set as a property on every model. """ related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): remote_label = f.remote_field.model._meta.concrete_model._meta.label related_objects_graph[remote_label].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[ model._meta.concrete_model._meta.label ] model._meta.__dict__["_relation_tree"] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get("_relation_tree", EMPTY_RELATION_TREE) @cached_property def _relation_tree(self): return self._populate_directed_relation_graph() def _expire_cache(self, forward=True, reverse=True): # This method is usually called by apps.cache_clear(), when the # registry is finalized, or when a new field is added. if forward: for cache_key in self.FORWARD_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) if reverse and not self.abstract: for cache_key in self.REVERSE_PROPERTIES: if cache_key in self.__dict__: delattr(self, cache_key) self._get_fields_cache = {} def get_fields(self, include_parents=True, include_hidden=False): """ Return a list of fields associated to the model. By default, include forward and reverse fields, fields derived from inheritance, but not hidden fields. The returned fields can be changed using the parameters: - include_parents: include fields derived from inheritance - include_hidden: include fields that have a related_name that starts with a "+" """ if include_parents is False: include_parents = PROXY_PARENTS return self._get_fields( include_parents=include_parents, include_hidden=include_hidden ) def _get_fields( self, forward=True, reverse=True, include_parents=True, include_hidden=False, seen_models=None, ): """ Internal helper function to return fields of the model. * If forward=True, then fields defined on this model are returned. * If reverse=True, then relations pointing to this model are returned. * If include_hidden=True, then fields with is_hidden=True are returned. * The include_parents argument toggles if fields from parent models should be included. It has three values: True, False, and PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all fields defined for the current model or any of its parents in the parent chain to the model's concrete model. """ if include_parents not in (True, False, PROXY_PARENTS): raise TypeError( "Invalid argument for include_parents: %s" % (include_parents,) ) # This helper function is used to allow recursion in ``get_fields()`` # implementation and to provide a fast way for Django's internals to # access specific subsets of fields. # We must keep track of which models we have already seen. Otherwise we # could include the same field multiple times from different models. topmost_call = seen_models is None if topmost_call: seen_models = set() seen_models.add(self.model) # Creates a cache key composed of all arguments cache_key = (forward, reverse, include_parents, include_hidden, topmost_call) try: # In order to avoid list manipulation. Always return a shallow copy # of the results. return self._get_fields_cache[cache_key] except KeyError: pass fields = [] # Recursively call _get_fields() on each parent, with the same # options provided in this call. if include_parents is not False: for parent in self.parents: # In diamond inheritance it is possible that we see the same # model from two different routes. In that case, avoid adding # fields from the same parent again. if parent in seen_models: continue if ( parent._meta.concrete_model != self.concrete_model and include_parents == PROXY_PARENTS ): continue for obj in parent._meta._get_fields( forward=forward, reverse=reverse, include_parents=include_parents, include_hidden=include_hidden, seen_models=seen_models, ): if ( not getattr(obj, "parent_link", False) or obj.model == self.concrete_model ): fields.append(obj) if reverse and not self.proxy: # Tree is computed once and cached until the app cache is expired. # It is composed of a list of fields pointing to the current model # from other models. all_fields = self._relation_tree for field in all_fields: # If hidden fields should be included or the relation is not # intentionally hidden, add to the fields dict. if include_hidden or not field.remote_field.hidden: fields.append(field.remote_field) if forward: fields += self.local_fields fields += self.local_many_to_many # Private fields are recopied to each child model, and they get a # different model as field.model in each child. Hence we have to # add the private fields separately from the topmost call. If we # did this recursively similar to local_fields, we would get field # instances with field.model != self.model. if topmost_call: fields += self.private_fields # In order to avoid list manipulation. Always # return a shallow copy of the results fields = make_immutable_fields_list("get_fields()", fields) # Store result into cache for later access self._get_fields_cache[cache_key] = fields return fields @cached_property def total_unique_constraints(self): """ Return a list of total unique constraints. Useful for determining set of fields guaranteed to be unique for all rows. """ return [ constraint for constraint in self.constraints if ( isinstance(constraint, UniqueConstraint) and constraint.condition is None and not constraint.contains_expressions ) ] @cached_property def _property_names(self): """Return a set of the names of the properties defined on the model.""" names = [] for name in dir(self.model): attr = inspect.getattr_static(self.model, name) if isinstance(attr, property): names.append(name) return frozenset(names) @cached_property def _non_pk_concrete_field_names(self): """ Return a set of the non-pk concrete field names defined on the model. """ names = [] for field in self.concrete_fields: if not field.primary_key: names.append(field.name) if field.name != field.attname: names.append(field.attname) return frozenset(names) @cached_property def db_returning_fields(self): """ Private API intended only to be used by Django itself. Fields to be returned after a database insert. """ return [ field for field in self._get_fields( forward=True, reverse=False, include_parents=PROXY_PARENTS ) if getattr(field, "db_returning", False) ]
castiel248/Convert
Lib/site-packages/django/db/models/options.py
Python
mit
38,977
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from itertools import chain, islice from asgiref.sync import sync_to_async import django from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, Field, sql from django.db.models.constants import LOOKUP_SEP, OnConflict from django.db.models.deletion import Collector from django.db.models.expressions import Case, F, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import ( AltersData, create_namedtuple_class, resolve_callables, ) from django.utils import timezone from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property, partition # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__( self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size async def _async_generator(self): # Generators don't actually start running until the first time you call # next() on them, so make the generator object in the async thread and # then repeatedly dispatch to it in a sync thread. sync_generator = self.__iter__() def next_slice(gen): return list(islice(gen, self.chunk_size)) while True: chunk = await sync_to_async(next_slice)(sync_generator) for item in chunk: yield item if len(chunk) < self.chunk_size: break # __aiter__() is a *synchronous* method that has to then return an # *asynchronous* iterator/generator. Thus, nest an async generator inside # it. # This is a generic iterable converter for now, and is going to suffer a # performance penalty on large sets of items due to the cost of crossing # over the sync barrier for each chunk. Custom __aiter__() methods should # be added to each Iterable subclass, but that needs some work in the # Compiler first. def __aiter__(self): return self._async_generator() class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ) select, klass_info, annotation_col_map = ( compiler.select, compiler.klass_info, compiler.annotation_col_map, ) model_cls = klass_info["model"] select_fields = klass_info["select_fields"] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [ f[0].target.attname for f in select[model_fields_start:model_fields_end] ] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ ( field, related_objs, operator.attrgetter( *[ field.attname if from_field == "self" else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ] ), ) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db( db, init_list, row[model_fields_start:model_fields_end] ) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class RawModelIterable(BaseIterable): """ Iterable that yields a model instance for each row from a raw queryset. """ def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.queryset.db query = self.queryset.query connection = connections[db] compiler = connection.ops.compiler("SQLCompiler")(query, connection, db) query_iterator = iter(query) try: ( model_init_names, model_init_pos, annotation_fields, ) = self.queryset.resolve_model_init_order() model_cls = self.queryset.model if model_cls._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( "Raw query must include the primary key" ) fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns] converters = compiler.get_converters( [f.get_col(f.model._meta.db_table) if f else None for f in fields] ) if converters: query_iterator = compiler.apply_converters(query_iterator, converters) for values in query_iterator: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(query, "cursor") and query.cursor: query.cursor.close() class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [ *queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields), ] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ), ) return compiler.results_iter( tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size, ) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] tuple_class = create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter( chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size ): yield row[0] class QuerySet(AltersData): """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, args, kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): if value.values_select: self._iterable_class = ValuesIterable self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == "_result_cache": obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled queryset instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def __repr__(self): data = list(self[: REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return "<%s %r>" % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( "QuerySet indices must be integers or slices, not %s." % type(k).__name__ ) if (isinstance(k, int) and k < 0) or ( isinstance(k, slice) and ( (k.start is not None and k.start < 0) or (k.stop is not None and k.stop < 0) ) ): raise ValueError("Negative indexing is not supported.") if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[:: k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __class_getitem__(cls, *args, **kwargs): return cls def __and__(self, other): self._check_operator_queryset(other, "&") self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._check_operator_queryset(other, "|") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.OR) return combined def __xor__(self, other): self._check_operator_queryset(other, "^") self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = ( self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values("pk")) ) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values("pk")) combined.query.combine(other.query, sql.XOR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): iterable = self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size or 2000, ) if not self._prefetch_related_lookups or chunk_size is None: yield from iterable return iterator = iter(iterable) while results := list(islice(iterator, chunk_size)): prefetch_related_objects(results, *self._prefetch_related_lookups) yield from results def iterator(self, chunk_size=None): """ An iterator over the results from applying this QuerySet to the database. chunk_size must be provided for QuerySets that prefetch related objects. Otherwise, a default chunk_size of 2000 is supplied. """ if chunk_size is None: if self._prefetch_related_lookups: # When the deprecation ends, replace with: # raise ValueError( # 'chunk_size must be provided when using ' # 'QuerySet.iterator() after prefetch_related().' # ) warnings.warn( "Using QuerySet.iterator() after prefetch_related() " "without specifying chunk_size is deprecated.", category=RemovedInDjango50Warning, stacklevel=2, ) elif chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) return self._iterator(use_chunked_fetch, chunk_size) async def aiterator(self, chunk_size=2000): """ An asynchronous iterator over the results from applying this QuerySet to the database. """ if self._prefetch_related_lookups: raise NotSupportedError( "Using QuerySet.aiterator() after prefetch_related() is not supported." ) if chunk_size <= 0: raise ValueError("Chunk size must be strictly positive.") use_chunked_fetch = not connections[self.db].settings_dict.get( "DISABLE_SERVER_SIDE_CURSORS" ) async for item in self._iterable_class( self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size ): yield item def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions( (*args, *kwargs.values()), method_name="aggregate" ) for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg return self.query.chain().get_aggregation(self.db, kwargs) async def aaggregate(self, *args, **kwargs): return await sync_to_async(self.aggregate)(*args, **kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) async def acount(self): return await sync_to_async(self.count)() def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ if self.query.combinator and (args or kwargs): raise NotSupportedError( "Calling QuerySet.get(...) with filters after %s() is not " "supported." % self.query.combinator ) clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if ( not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit ): limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( "get() returned more than one %s -- it returned %s!" % ( self.model._meta.object_name, num if not limit or num < limit else "more than %s" % (limit - 1), ) ) async def aget(self, *args, **kwargs): return await sync_to_async(self.get)(*args, **kwargs) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj async def acreate(self, **kwargs): return await sync_to_async(self.create)(**kwargs) def _prepare_for_bulk_create(self, objs): for obj in objs: if obj.pk is None: # Populate new PK values. obj.pk = obj._meta.pk.get_pk_value_on_save(obj) obj._prepare_related_fields_for_save(operation_name="bulk_create") def _check_bulk_create_options( self, ignore_conflicts, update_conflicts, update_fields, unique_fields ): if ignore_conflicts and update_conflicts: raise ValueError( "ignore_conflicts and update_conflicts are mutually exclusive." ) db_features = connections[self.db].features if ignore_conflicts: if not db_features.supports_ignore_conflicts: raise NotSupportedError( "This database backend does not support ignoring conflicts." ) return OnConflict.IGNORE elif update_conflicts: if not db_features.supports_update_conflicts: raise NotSupportedError( "This database backend does not support updating conflicts." ) if not update_fields: raise ValueError( "Fields that will be updated when a row insertion fails " "on conflicts must be provided." ) if unique_fields and not db_features.supports_update_conflicts_with_target: raise NotSupportedError( "This database backend does not support updating " "conflicts with specifying unique fields that can trigger " "the upsert." ) if not unique_fields and db_features.supports_update_conflicts_with_target: raise ValueError( "Unique fields that can trigger the upsert must be provided." ) # Updating primary keys and non-concrete fields is forbidden. if any(not f.concrete or f.many_to_many for f in update_fields): raise ValueError( "bulk_create() can only be used with concrete fields in " "update_fields." ) if any(f.primary_key for f in update_fields): raise ValueError( "bulk_create() cannot be used with primary keys in " "update_fields." ) if unique_fields: if any(not f.concrete or f.many_to_many for f in unique_fields): raise ValueError( "bulk_create() can only be used with concrete fields " "in unique_fields." ) return OnConflict.UPDATE return None def bulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs opts = self.model._meta if unique_fields: # Primary key is allowed in unique_fields. unique_fields = [ self.model._meta.get_field(opts.pk.name if name == "pk" else name) for name in unique_fields ] if update_fields: update_fields = [self.model._meta.get_field(name) for name in update_fields] on_conflict = self._check_bulk_create_options( ignore_conflicts, update_conflicts, update_fields, unique_fields, ) self._for_write = True fields = opts.concrete_fields objs = list(objs) self._prepare_for_bulk_create(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) connection = connections[self.db] if ( connection.features.can_return_rows_from_bulk_insert and on_conflict is None ): assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs async def abulk_create( self, objs, batch_size=None, ignore_conflicts=False, update_conflicts=False, update_fields=None, unique_fields=None, ): return await sync_to_async(self.bulk_create)( objs=objs, batch_size=batch_size, ignore_conflicts=ignore_conflicts, update_conflicts=update_conflicts, update_fields=update_fields, unique_fields=unique_fields, ) def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size <= 0: raise ValueError("Batch size must be a positive integer.") if not fields: raise ValueError("Field names must be given to bulk_update().") objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError("All bulk_update() objects must have a primary key set.") fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError("bulk_update() can only be used with concrete fields.") if any(f.primary_key for f in fields): raise ValueError("bulk_update() cannot be used with primary key fields.") if not objs: return 0 for obj in objs: obj._prepare_related_fields_for_save( operation_name="bulk_update", fields=fields ) # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. self._for_write = True connection = connections[self.db] max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connection.features.requires_casted_case_in_updates batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not hasattr(attr, "resolve_expression"): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) rows_updated = 0 queryset = self.using(self.db) with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs) return rows_updated bulk_update.alters_data = True async def abulk_update(self, objs, fields, batch_size=None): return await sync_to_async(self.bulk_update)( objs=objs, fields=fields, batch_size=batch_size, ) abulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Try to create an object using passed params. try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) return self.create(**params), True except IntegrityError: try: return self.get(**kwargs), False except self.model.DoesNotExist: pass raise async def aget_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.get_or_create)( defaults=defaults, **kwargs, ) def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): # Lock the row so that a concurrent update is blocked until # update_or_create() has performed its save. obj, created = self.select_for_update().get_or_create(defaults, **kwargs) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) update_fields = set(defaults) concrete_field_names = self.model._meta._non_pk_concrete_field_names # update_fields does not support non-concrete fields. if concrete_field_names.issuperset(update_fields): # Add fields which are set on pre_save(), e.g. auto_now fields. # This is to maintain backward compatibility as these fields # are not updated unless explicitly specified in the # update_fields list. for field in self.model._meta.local_concrete_fields: if not ( field.primary_key or field.__class__.pre_save is Field.pre_save ): update_fields.add(field.name) if field.name != field.attname: update_fields.add(field.attname) obj.save(using=self.db, update_fields=update_fields) else: obj.save(using=self.db) return obj, False async def aupdate_or_create(self, defaults=None, **kwargs): return await sync_to_async(self.update_or_create)( defaults=defaults, **kwargs, ) def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), ) ) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, "get_latest_by") if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self._earliest(*fields) async def aearliest(self, *fields): return await sync_to_async(self.earliest)(*fields) def latest(self, *fields): """ Return the latest object according to fields (if given) or by the model's Meta.get_latest_by. """ if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") return self.reverse()._earliest(*fields) async def alatest(self, *fields): return await sync_to_async(self.latest)(*fields) def first(self): """Return the first object of a query or None if no match is found.""" if self.ordered: queryset = self else: self._check_ordering_first_last_queryset_aggregation(method="first") queryset = self.order_by("pk") for obj in queryset[:1]: return obj async def afirst(self): return await sync_to_async(self.first)() def last(self): """Return the last object of a query or None if no match is found.""" if self.ordered: queryset = self.reverse() else: self._check_ordering_first_last_queryset_aggregation(method="last") queryset = self.order_by("-pk") for obj in queryset[:1]: return obj async def alast(self): return await sync_to_async(self.last)() def in_bulk(self, id_list=None, *, field_name="pk"): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") opts = self.model._meta unique_fields = [ constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1 ] if ( field_name != "pk" and not opts.get_field(field_name).unique and field_name not in unique_fields and self.query.distinct_fields != (field_name,) ): raise ValueError( "in_bulk()'s field_name must be a unique field but %r isn't." % field_name ) if id_list is not None: if not id_list: return {} filter_key = "{}__in".format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset : offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} async def ain_bulk(self, id_list=None, *, field_name="pk"): return await sync_to_async(self.in_bulk)( id_list=id_list, field_name=field_name, ) def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries("delete") if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with delete().") if self.query.distinct or self.query.distinct_fields: raise TypeError("Cannot call delete() after .distinct().") if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force=True) collector = Collector(using=del_query.db, origin=self) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True async def adelete(self): return await sync_to_async(self.delete)() adelete.alters_data = True adelete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries("update") if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Inline annotations in order_by(), if possible. new_order_by = [] for col in query.order_by: if annotation := query.annotations.get(col): if getattr(annotation, "contains_aggregate", False): raise exceptions.FieldError( f"Cannot update when ordering by an aggregate: {annotation}" ) new_order_by.append(annotation) else: new_order_by.append(col) query.order_by = tuple(new_order_by) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True async def aupdate(self, **kwargs): return await sync_to_async(self.update)(**kwargs) aupdate.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): """ Return True if the QuerySet would have any results, False otherwise. """ if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) async def aexists(self): return await sync_to_async(self.exists)() def contains(self, obj): """ Return True if the QuerySet contains the provided obj, False otherwise. """ self._not_support_combined_queries("contains") if self._fields is not None: raise TypeError( "Cannot call QuerySet.contains() after .values() or .values_list()." ) try: if obj._meta.concrete_model != self.model._meta.concrete_model: return False except AttributeError: raise TypeError("'obj' must be a model instance.") if obj.pk is None: raise ValueError("QuerySet.contains() cannot be used on unsaved objects.") if self._result_cache is not None: return obj in self._result_cache return self.filter(pk=obj.pk).exists() async def acontains(self, obj): return await sync_to_async(self.contains)(obj=obj) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): """ Runs an EXPLAIN on the SQL query this QuerySet would perform, and returns the results. """ return self.query.explain(using=self.db, format=format, **options) async def aexplain(self, *, format=None, **options): return await sync_to_async(self.explain)(format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=(), translations=None, using=None): if using is None: using = self.db qs = RawQuerySet( raw_query, model=self.model, params=params, translations=translations, using=using, ) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError( "'flat' is not valid when values_list is called with more than one " "field." ) field_names = {f for f in fields if not hasattr(f, "resolve_expression")} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, "resolve_expression"): field_id_prefix = getattr( field, "default_alias", field.__class__.__name__.lower() ) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order="ASC"): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day"): raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") return ( self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name), ) .values_list("datefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datefield") ) # RemovedInDjango50Warning: when the deprecation ends, remove is_dst # argument. def datetimes( self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED ): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ if kind not in ("year", "month", "week", "day", "hour", "minute", "second"): raise ValueError( "'kind' must be one of 'year', 'month', 'week', 'day', " "'hour', 'minute', or 'second'." ) if order not in ("ASC", "DESC"): raise ValueError("'order' must be either 'ASC' or 'DESC'.") if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return ( self.annotate( datetimefield=Trunc( field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo, is_dst=is_dst, ), plain_field=F(field_name), ) .values_list("datetimefield", flat=True) .distinct() .filter(plain_field__isnull=False) .order_by(("-" if order == "DESC" else "") + "datetimefield") ) def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries("filter") return self._filter_or_exclude(False, args, kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries("exclude") return self._filter_or_exclude(True, args, kwargs) def _filter_or_exclude(self, negate, args, kwargs): if (args or kwargs) and self.query.is_sliced: raise TypeError("Cannot filter a query once a slice has been taken.") clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, args, kwargs) return clone def _filter_or_exclude_inplace(self, negate, args, kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, args=(), kwargs=filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(force=True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple( qs.query for qs in other_qs ) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] if not qs: return self if len(qs) == 1: return qs[0] return qs[0]._combinator_query("union", *qs[1:], all=all) return self._combinator_query("union", *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query("intersection", *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query("difference", *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError("The nowait option cannot be used with skip_locked.") obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of obj.query.select_for_no_key_update = no_key return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries("select_related") if self._fields is not None: raise TypeError( "Cannot call select_related() after .values() or .values_list()" ) obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries("prefetch_related") clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError( "prefetch_related() is not supported with FilteredRelation." ) clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries("annotate") return self._annotate(args, kwargs, select=True) def alias(self, *args, **kwargs): """ Return a query set with added aliases for extra data or aggregations. """ self._not_support_combined_queries("alias") return self._annotate(args, kwargs, select=False) def _annotate(self, args, kwargs, select=True): self._validate_values_are_expressions( args + tuple(kwargs.values()), method_name="annotate" ) annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError( "The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias ) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set( chain.from_iterable( (field.name, field.attname) if hasattr(field, "attname") else (field.name,) for field in self.model._meta.get_fields() ) ) for alias, annotation in annotations.items(): if alias in names: raise ValueError( "The annotation '%s' conflicts with a field on " "the model." % alias ) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation( annotation, alias, select=select, ) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" if self.query.is_sliced: raise TypeError("Cannot reorder a query once a slice has been taken.") obj = self._chain() obj.query.clear_ordering(force=True, clear_default=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ self._not_support_combined_queries("distinct") if self.query.is_sliced: raise TypeError( "Cannot create distinct fields once a slice has been taken." ) obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra( self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None, ): """Add extra SQL fragments to the query.""" self._not_support_combined_queries("extra") if self.query.is_sliced: raise TypeError("Cannot change a query once a slice has been taken.") clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError("Cannot reverse a query once a slice has been taken.") clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries("defer") if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries("only") if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError("only() is not supported with FilteredRelation.") clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif ( self.query.default_ordering and self.query.get_meta().ordering and # A default ordering doesn't affect GROUP BY queries. not self.query.group_by ): return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert( self, objs, fields, returning_fields=None, raw=False, using=None, on_conflict=None, update_fields=None, unique_fields=None, ): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery( self.model, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert( self, objs, fields, batch_size, on_conflict=None, update_fields=None, unique_fields=None, ): """ Helper method for bulk_create() to insert objs one batch at a time. """ connection = connections[self.db] ops = connection.ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connection.features.can_return_rows_from_bulk_insert for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and on_conflict is None: inserted_rows.extend( self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ) ) else: self._insert( item, fields=fields, using=self.db, on_conflict=on_conflict, update_fields=update_fields, unique_fields=unique_fields, ) return inserted_rows def _chain(self): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__( model=self.model, query=self.query.chain(), using=self._db, hints=self._hints, ) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select) ): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError("Cannot use multi-field values as a filter value.") query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted( str(arg) for arg in values if not hasattr(arg, "resolve_expression") ) if invalid_args: raise TypeError( "QuerySet.%s() received non-expression(s): %s." % ( method_name, ", ".join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( "Calling QuerySet.%s() after %s() is not supported." % (operation_name, self.query.combinator) ) def _check_operator_queryset(self, other, operator_): if self.query.combinator or other.query.combinator: raise TypeError(f"Cannot use {operator_} operator with combined queryset.") def _check_ordering_first_last_queryset_aggregation(self, method): if isinstance(self.query.group_by, tuple) and not any( col.output_field is self.model._meta.pk for col in self.query.group_by ): raise TypeError( f"Cannot use QuerySet.{method}() on an unordered queryset performing " f"aggregation. Add an ordering with order_by()." ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__( self, raw_query, model=None, query=None, params=(), translations=None, using=None, hints=None, ): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [ f for f in self.model._meta.fields if converter(f.column) in self.columns ] annotation_fields = [ (column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields ] model_init_order = [ self.columns.index(converter(f.column)) for f in model_init_fields ] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints, ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def __aiter__(self): # Remember, __aiter__ itself is synchronous, it's the thing it returns # that is async! async def generator(): await sync_to_async(self._fetch_all)() for item in self._result_cache: yield item return generator() def iterator(self): yield from RawModelIterable(self) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for query_name, model_name in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, "_iterable_class") and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( "Prefetch querysets cannot use raw(), values(), and values_list()." ) if to_attr: self.prefetch_to = LOOKUP_SEP.join( lookup.split(LOOKUP_SEP)[:-1] + [to_attr] ) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: queryset = self.queryset._chain() # Prevent the QuerySet from being evaluated queryset._result_cache = [] queryset._prefetch_done = True obj_dict["queryset"] = queryset return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError( "'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to ) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, "_prefetched_objects_cache"): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher( first_obj, through_attr, to_attr ) if not attr_found: raise AttributeError( "Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % ( through_attr, first_obj.__class__.__name__, lookup.prefetch_through, ) ) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError( "'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through ) obj_to_fetch = None if prefetcher is not None: obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] if obj_to_fetch: obj_list, additional_lookups = prefetch_one_level( obj_to_fetch, prefetcher, lookup, level, ) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not ( prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors ): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups( reversed(additional_lookups), prefetch_to ) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, "_prefetched_objects_cache", ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a function that takes an instance and returns a boolean that is True if the attribute has already been fetched for that instance) """ def has_to_attr_attribute(instance): return hasattr(instance, to_attr) prefetcher = None is_fetched = has_to_attr_attribute # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, "get_prefetch_queryset"): prefetcher = rel_obj_descriptor is_fetched = rel_obj_descriptor.is_cached else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, "get_prefetch_queryset"): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance( getattr(instance.__class__, to_attr, None), cached_property ): def has_cached_property(instance): return to_attr in instance.__dict__ is_fetched = has_cached_property else: def in_prefetched_cache(instance): return through_attr in instance._prefetched_objects_cache is_fetched = in_prefetched_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. ( rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor, ) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = "to_attr={} conflicts with a field on the {} model." raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info["select_fields"] from_parent = klass_info["from_parent"] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start : self.cols_end] ] self.reorder_for_init = None else: attname_indexes = { select[idx][0].target.attname: idx for idx in select_fields } model_init_attnames = ( f.attname for f in klass_info["model"]._meta.concrete_fields ) self.init_list = [ attname for attname in model_init_attnames if attname in attname_indexes ] self.reorder_for_init = operator.itemgetter( *[attname_indexes[attname] for attname in self.init_list] ) self.model_cls = klass_info["model"] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info["local_setter"] self.remote_setter = klass_info["remote_setter"] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start : self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get("related_klass_infos", []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
castiel248/Convert
Lib/site-packages/django/db/models/query.py
Python
mit
101,762
""" Various data structures used in query construction. Factored out from django.db.models.query to avoid making the main module very large and/or so that they can be used by other modules without getting into circular import difficulties. """ import functools import inspect import logging from collections import namedtuple from django.core.exceptions import FieldError from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections from django.db.models.constants import LOOKUP_SEP from django.utils import tree logger = logging.getLogger("django.db.models") # PathInfo is used when converting lookups (fk__somecol). The contents # describe the relation in Model terms (model Options and Fields for both # sides of the relation. The join_field is the field backing the relation. PathInfo = namedtuple( "PathInfo", "from_opts to_opts target_fields join_field m2m direct filtered_relation", ) def subclasses(cls): yield cls for subclass in cls.__subclasses__(): yield from subclasses(subclass) class Q(tree.Node): """ Encapsulate filters as objects that can then be combined logically (using `&` and `|`). """ # Connection types AND = "AND" OR = "OR" XOR = "XOR" default = AND conditional = True def __init__(self, *args, _connector=None, _negated=False, **kwargs): super().__init__( children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated, ) def _combine(self, other, conn): if getattr(other, "conditional", False) is False: raise TypeError(other) if not self: return other.copy() if not other and isinstance(other, Q): return self.copy() obj = self.create(connector=conn) obj.add(self, conn) obj.add(other, conn) return obj def __or__(self, other): return self._combine(other, self.OR) def __and__(self, other): return self._combine(other, self.AND) def __xor__(self, other): return self._combine(other, self.XOR) def __invert__(self): obj = self.copy() obj.negate() return obj def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False ): # We must promote any new joins to left outer joins so that when Q is # used as an expression, rows aren't filtered due to joins. clause, joins = query._add_q( self, reuse, allow_joins=allow_joins, split_subq=False, check_filterable=False, summarize=summarize, ) query.promote_joins(joins) return clause def flatten(self): """ Recursively yield this Q object and all subexpressions, in depth-first order. """ yield self for child in self.children: if isinstance(child, tuple): # Use the lookup. child = child[1] if hasattr(child, "flatten"): yield from child.flatten() else: yield child def check(self, against, using=DEFAULT_DB_ALIAS): """ Do a database query to check if the expressions of the Q instance matches against the expressions. """ # Avoid circular imports. from django.db.models import BooleanField, Value from django.db.models.functions import Coalesce from django.db.models.sql import Query from django.db.models.sql.constants import SINGLE query = Query(None) for name, value in against.items(): if not hasattr(value, "resolve_expression"): value = Value(value) query.add_annotation(value, name, select=False) query.add_annotation(Value(1), "_check") # This will raise a FieldError if a field is missing in "against". if connections[using].features.supports_comparing_boolean_expr: query.add_q(Q(Coalesce(self, True, output_field=BooleanField()))) else: query.add_q(self) compiler = query.get_compiler(using=using) try: return compiler.execute_sql(SINGLE) is not None except DatabaseError as e: logger.warning("Got a database error calling check() on %r: %s", self, e) return True def deconstruct(self): path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) if path.startswith("django.db.models.query_utils"): path = path.replace("django.db.models.query_utils", "django.db.models") args = tuple(self.children) kwargs = {} if self.connector != self.default: kwargs["_connector"] = self.connector if self.negated: kwargs["_negated"] = True return path, args, kwargs class DeferredAttribute: """ A wrapper for a deferred-loading field. When the value is read from this object the first time, the query is executed. """ def __init__(self, field): self.field = field def __get__(self, instance, cls=None): """ Retrieve and caches the value from the datastore on the first lookup. Return the cached value. """ if instance is None: return self data = instance.__dict__ field_name = self.field.attname if field_name not in data: # Let's see if the field is part of the parent chain. If so we # might be able to reuse the already loaded value. Refs #18343. val = self._check_parent_chain(instance) if val is None: instance.refresh_from_db(fields=[field_name]) else: data[field_name] = val return data[field_name] def _check_parent_chain(self, instance): """ Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field. """ opts = instance._meta link_field = opts.get_ancestor_link(self.field.model) if self.field.primary_key and self.field != link_field: return getattr(instance, link_field.attname) return None class class_or_instance_method: """ Hook used in RegisterLookupMixin to return partial functions depending on the caller type (instance or class of models.Field). """ def __init__(self, class_method, instance_method): self.class_method = class_method self.instance_method = instance_method def __get__(self, instance, owner): if instance is None: return functools.partial(self.class_method, owner) return functools.partial(self.instance_method, instance) class RegisterLookupMixin: def _get_lookup(self, lookup_name): return self.get_lookups().get(lookup_name, None) @functools.lru_cache(maxsize=None) def get_class_lookups(cls): class_lookups = [ parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls) ] return cls.merge_dicts(class_lookups) def get_instance_lookups(self): class_lookups = self.get_class_lookups() if instance_lookups := getattr(self, "instance_lookups", None): return {**class_lookups, **instance_lookups} return class_lookups get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups) get_class_lookups = classmethod(get_class_lookups) def get_lookup(self, lookup_name): from django.db.models.lookups import Lookup found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_lookup(lookup_name) if found is not None and not issubclass(found, Lookup): return None return found def get_transform(self, lookup_name): from django.db.models.lookups import Transform found = self._get_lookup(lookup_name) if found is None and hasattr(self, "output_field"): return self.output_field.get_transform(lookup_name) if found is not None and not issubclass(found, Transform): return None return found @staticmethod def merge_dicts(dicts): """ Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. """ merged = {} for d in reversed(dicts): merged.update(d) return merged @classmethod def _clear_cached_class_lookups(cls): for subclass in subclasses(cls): subclass.get_class_lookups.cache_clear() def register_class_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "class_lookups" not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup cls._clear_cached_class_lookups() return lookup def register_instance_lookup(self, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if "instance_lookups" not in self.__dict__: self.instance_lookups = {} self.instance_lookups[lookup_name] = lookup return lookup register_lookup = class_or_instance_method( register_class_lookup, register_instance_lookup ) register_class_lookup = classmethod(register_class_lookup) def _unregister_class_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name] cls._clear_cached_class_lookups() def _unregister_instance_lookup(self, lookup, lookup_name=None): """ Remove given lookup from instance lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del self.instance_lookups[lookup_name] _unregister_lookup = class_or_instance_method( _unregister_class_lookup, _unregister_instance_lookup ) _unregister_class_lookup = classmethod(_unregister_class_lookup) def select_related_descend(field, restricted, requested, select_mask, reverse=False): """ Return True if this field should be used to descend deeper for select_related() purposes. Used by both the query construction code (compiler.get_related_selections()) and the model instance creation code (compiler.klass_info). Arguments: * field - the field to be checked * restricted - a boolean field, indicating if the field list has been manually restricted using a requested clause) * requested - The select_related() dictionary. * select_mask - the dictionary of selected fields. * reverse - boolean, True if we are checking a reverse select related """ if not field.remote_field: return False if field.remote_field.parent_link and not reverse: return False if restricted: if reverse and field.related_query_name() not in requested: return False if not reverse and field.name not in requested: return False if not restricted and field.null: return False if ( restricted and select_mask and field.name in requested and field not in select_mask ): raise FieldError( f"Field {field.model._meta.object_name}.{field.name} cannot be both " "deferred and traversed using select_related at the same time." ) return True def refs_expression(lookup_parts, annotations): """ Check if the lookup_parts contains references to the given annotations set. Because the LOOKUP_SEP is contained in the default annotation names, check each prefix of the lookup_parts for a match. """ for n in range(1, len(lookup_parts) + 1): level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n]) if annotations.get(level_n_lookup): return level_n_lookup, lookup_parts[n:] return None, () def check_rel_lookup_compatibility(model, target_opts, field): """ Check that self.model is compatible with target_opts. Compatibility is OK if: 1) model and opts match (where proxy inheritance is removed) 2) model is parent of opts' model or the other way around """ def check(opts): return ( model._meta.concrete_model == opts.concrete_model or opts.concrete_model in model._meta.get_parent_list() or model in opts.get_parent_list() ) # If the field is a primary key, then doing a query against the field's # model is ok, too. Consider the case: # class Restaurant(models.Model): # place = OneToOneField(Place, primary_key=True): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible # with that. This logic applies only to primary keys, as when doing __in=qs, # we are going to turn this into __in=qs.values('pk') later on. return check(target_opts) or ( getattr(field, "primary_key", False) and check(field.model._meta) ) class FilteredRelation: """Specify custom filtering in the ON clause of SQL joins.""" def __init__(self, relation_name, *, condition=Q()): if not relation_name: raise ValueError("relation_name cannot be empty.") self.relation_name = relation_name self.alias = None if not isinstance(condition, Q): raise ValueError("condition argument must be a Q() instance.") self.condition = condition self.path = [] def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.relation_name == other.relation_name and self.alias == other.alias and self.condition == other.condition ) def clone(self): clone = FilteredRelation(self.relation_name, condition=self.condition) clone.alias = self.alias clone.path = self.path[:] return clone def resolve_expression(self, *args, **kwargs): """ QuerySet.annotate() only accepts expression-like arguments (with a resolve_expression() method). """ raise NotImplementedError("FilteredRelation.resolve_expression() is unused.") def as_sql(self, compiler, connection): # Resolve the condition in Join.filtered_relation. query = compiler.query where = query.build_filtered_relation_q(self.condition, reuse=set(self.path)) return compiler.compile(where)
castiel248/Convert
Lib/site-packages/django/db/models/query_utils.py
Python
mit
15,244
from functools import partial from django.db.models.utils import make_model_tuple from django.dispatch import Signal class_prepared = Signal() class ModelSignal(Signal): """ Signal subclass that allows the sender to be lazily specified as a string of the `app_label.ModelName` form. """ def _lazy_method(self, method, apps, receiver, sender, **kwargs): from django.db.models.options import Options # This partial takes a single optional argument named "sender". partial_method = partial(method, receiver, **kwargs) if isinstance(sender, str): apps = apps or Options.default_apps apps.lazy_model_operation(partial_method, make_model_tuple(sender)) else: return partial_method(sender) def connect(self, receiver, sender=None, weak=True, dispatch_uid=None, apps=None): self._lazy_method( super().connect, apps, receiver, sender, weak=weak, dispatch_uid=dispatch_uid, ) def disconnect(self, receiver=None, sender=None, dispatch_uid=None, apps=None): return self._lazy_method( super().disconnect, apps, receiver, sender, dispatch_uid=dispatch_uid ) pre_init = ModelSignal(use_caching=True) post_init = ModelSignal(use_caching=True) pre_save = ModelSignal(use_caching=True) post_save = ModelSignal(use_caching=True) pre_delete = ModelSignal(use_caching=True) post_delete = ModelSignal(use_caching=True) m2m_changed = ModelSignal(use_caching=True) pre_migrate = Signal() post_migrate = Signal()
castiel248/Convert
Lib/site-packages/django/db/models/signals.py
Python
mit
1,622
from django.db.models.sql.query import * # NOQA from django.db.models.sql.query import Query from django.db.models.sql.subqueries import * # NOQA from django.db.models.sql.where import AND, OR, XOR __all__ = ["Query", "AND", "OR", "XOR"]
castiel248/Convert
Lib/site-packages/django/db/models/sql/__init__.py
Python
mit
241
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.lookups import Lookup from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.models.sql.where import AND from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class PositionRef(Ref): def __init__(self, ordinal, refs, source): self.ordinal = ordinal super().__init__(refs, source) def as_sql(self, compiler, connection): return str(self.ordinal), () class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r"^(.*)\s(?:ASC|DESC).*", re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {"*": "*"} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def __repr__(self): return ( f"<{self.__class__.__qualname__} " f"model={self.query.model.__qualname__} " f"connection={self.connection!r} using={self.using!r}>" ) def setup_query(self, with_col_aliases=False): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select( with_col_aliases=with_col_aliases, ) self.col_count = len(self.select) def pre_sql_setup(self, with_col_aliases=False): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query(with_col_aliases=with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify( must_group_by=self.query.group_by is not None ) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk') # .annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] group_by_refs = set() if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, "as_sql"): expr = self.query.resolve_ref(expr) if isinstance(expr, Ref): if expr.refs not in group_by_refs: group_by_refs.add(expr.refs) expressions.append(expr.source) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. selected_expr_indices = {} for index, (expr, _, alias) in enumerate(select, start=1): if alias: selected_expr_indices[expr] = index # Skip members of the select clause that are already explicitly # grouped against. if alias in group_by_refs: continue expressions.extend(expr.get_group_by_cols()) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) allows_group_by_select_index = ( self.connection.features.allows_group_by_select_index ) for expr in expressions: try: sql, params = self.compile(expr) except (EmptyResultSet, FullResultSet): continue if ( allows_group_by_select_index and (select_index := selected_expr_indices.get(expr)) is not None ): sql, params = str(select_index), () else: sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the database supports group by functional dependence reduction, # then the expressions can be reduced to the set of selected table # primary keys as all other columns are functionally dependent on them. if self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, "target") and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model( expr.target.model ) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or expr in having or getattr(expr, "alias", None) not in aliases ] return expressions def get_select(self, with_col_aliases=False): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) select_mask = self.query.get_select_mask() if self.query.default_cols: cols = self.get_default_columns(select_mask) else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { "model": self.query.model, "select_fields": select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select, select_mask) klass_info["related_klass_infos"] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info["related_klass_infos"]: if ki["from_parent"]: ki["select_fields"] = ( klass_info["select_fields"] + ki["select_fields"] ) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] col_idx = 1 for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: empty_result_set_value = getattr( col, "empty_result_set_value", NotImplemented ) if empty_result_set_value is NotImplemented: # Select a predicate that's always False. sql, params = "0", () else: sql, params = self.compile(Value(empty_result_set_value)) except FullResultSet: sql, params = self.compile(Value(True)) else: sql, params = col.select_format(self, sql, params) if alias is None and with_col_aliases: alias = f"col{col_idx}" col_idx += 1 ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif (meta := self.query.get_meta()) and meta.ordering: ordering = meta.ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR["ASC"] else: default_order, _ = ORDER_DIR["DESC"] selected_exprs = {} # Avoid computing `selected_exprs` if there is no `ordering` as it's # relatively expensive. if ordering and (select := self.select): for ordinal, (expr, _, alias) in enumerate(select, start=1): pos_expr = PositionRef(ordinal, alias, expr) if alias: selected_exprs[alias] = pos_expr selected_exprs[expr] = pos_expr for field in ordering: if hasattr(field, "resolve_expression"): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() select_ref = selected_exprs.get(field.expression) if select_ref or ( isinstance(field.expression, F) and (select_ref := selected_exprs.get(field.expression.name)) ): # Emulation of NULLS (FIRST|LAST) cannot be combined with # the usage of ordering by position. if ( field.nulls_first is None and field.nulls_last is None ) or self.connection.features.supports_order_by_nulls_modifier: field = field.copy() field.expression = select_ref # Alias collisions are not possible when dealing with # combined queries so fallback to it if emulation of NULLS # handling is required. elif self.query.combinator: field = field.copy() field.expression = Ref(select_ref.refs, select_ref.source) yield field, select_ref is not None continue if field == "?": # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == "DESC" if select_ref := selected_exprs.get(col): # Reference to expression in SELECT clause yield ( OrderBy( select_ref, descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if "." in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split(".", 1) yield ( OrderBy( RawSQL( "%s.%s" % (self.quote_name_unless_alias(table), col), [] ), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy( Ref(col, RawSQL(*self.query.extra[col])), descending=descending, ), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if not is_ref and self.query.combinator and self.select: src = resolved.expression expr_src = expr.expression for sel_expr, _, col_alias in self.select: if src == sel_expr: # When values() is used the exact alias must be used to # reference annotations. if ( self.query.has_select_fields and col_alias in self.query.annotation_select and not ( isinstance(expr_src, F) and col_alias == expr_src.name ) ): continue resolved.set_source_expressions( [Ref(col_alias if col_alias else src.target.column, src)] ) break else: # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_alias = f"__orderbycol{order_by_idx}" for q in self.query.combined_queries: # If fields were explicitly selected through values() # combined queries cannot be augmented. if q.has_select_fields: raise DatabaseError( "ORDER BY term does not match any column in " "the result set." ) q.add_annotation(expr_src, col_alias) self.query.add_select_col(resolved, col_alias) resolved.set_source_expressions([Ref(col_alias, src)]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ( (name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map ) ): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, "as_" + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries ] if not features.supports_slicing_ordering_in_compound: for compiler in compilers: if compiler.query.is_sliced: raise DatabaseError( "LIMIT/OFFSET not allowed in subqueries of compound statements." ) if compiler.get_order_by(): raise DatabaseError( "ORDER BY not allowed in subqueries of compound statements." ) elif self.query.is_sliced and combinator == "union": for compiler in compilers: # A sliced union cannot have its parts elided as some of them # might be sliced as well and in the event where only a single # part produces a non-empty resultset it might be impossible to # generate valid SQL. compiler.elide_empty = False parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values( ( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, ) ) part_sql, part_args = compiler.as_sql(with_col_aliases=True) if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = "SELECT * FROM ({})".format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif ( self.query.subquery or not features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) elif ( self.query.subquery and features.supports_slicing_ordering_in_compound ): part_sql = "({})".format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == "union" or (combinator == "difference" and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == "union": combinator_sql += " ALL" braces = "{}" if not self.query.subquery and features.supports_slicing_ordering_in_compound: braces = "({})" sql_parts, args_parts = zip( *((braces.format(sql), args) for sql, args in parts) ) result = [" {} ".format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def get_qualify_sql(self): where_parts = [] if self.where: where_parts.append(self.where) if self.having: where_parts.append(self.having) inner_query = self.query.clone() inner_query.subquery = True inner_query.where = inner_query.where.__class__(where_parts) # Augment the inner query with any window function references that # might have been masked via values() and alias(). If any masked # aliases are added they'll be masked again to avoid fetching # the data in the `if qual_aliases` branch below. select = { expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0] } select_aliases = set(select.values()) qual_aliases = set() replacements = {} def collect_replacements(expressions): while expressions: expr = expressions.pop() if expr in replacements: continue elif select_alias := select.get(expr): replacements[expr] = select_alias elif isinstance(expr, Lookup): expressions.extend(expr.get_source_expressions()) elif isinstance(expr, Ref): if expr.refs not in select_aliases: expressions.extend(expr.get_source_expressions()) else: num_qual_alias = len(qual_aliases) select_alias = f"qual{num_qual_alias}" qual_aliases.add(select_alias) inner_query.add_annotation(expr, select_alias) replacements[expr] = select_alias collect_replacements(list(self.qualify.leaves())) self.qualify = self.qualify.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) order_by = [] for order_by_expr, *_ in self.get_order_by(): collect_replacements(order_by_expr.get_source_expressions()) order_by.append( order_by_expr.replace_expressions( {expr: Ref(alias, expr) for expr, alias in replacements.items()} ) ) inner_query_compiler = inner_query.get_compiler( self.using, connection=self.connection, elide_empty=self.elide_empty ) inner_sql, inner_params = inner_query_compiler.as_sql( # The limits must be applied to the outer query to avoid pruning # results too eagerly. with_limits=False, # Force unique aliasing of selected columns to avoid collisions # and make rhs predicates referencing easier. with_col_aliases=True, ) qualify_sql, qualify_params = self.compile(self.qualify) result = [ "SELECT * FROM (", inner_sql, ")", self.connection.ops.quote_name("qualify"), "WHERE", qualify_sql, ] if qual_aliases: # If some select aliases were unmasked for filtering purposes they # must be masked back. cols = [self.connection.ops.quote_name(alias) for alias in select.values()] result = [ "SELECT", ", ".join(cols), "FROM (", *result, ")", self.connection.ops.quote_name("qualify_mask"), ] params = list(inner_params) + qualify_params # As the SQL spec is unclear on whether or not derived tables # ordering must propagate it has to be explicitly repeated on the # outer-most query to ensure it's preserved. if order_by: ordering_sqls = [] for ordering in order_by: ordering_sql, ordering_params = self.compile(ordering) ordering_sqls.append(ordering_sql) params.extend(ordering_params) result.extend(["ORDER BY", ", ".join(ordering_sqls)]) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: combinator = self.query.combinator extra_select, order_by, group_by = self.pre_sql_setup( with_col_aliases=with_col_aliases or bool(combinator), ) for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and self.query.is_sliced combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, "supports_select_{}".format(combinator)): raise NotSupportedError( "{} is not supported on this database backend.".format( combinator ) ) result, params = self.get_combinator_sql( combinator, self.query.combinator_all ) elif self.qualify: result, params = self.get_qualify_sql() order_by = None else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = ( self.compile(self.where) if self.where is not None else ("", []) ) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = "0 = 1", [] except FullResultSet: where, w_params = "", [] try: having, h_params = ( self.compile(self.having) if self.having is not None else ("", []) ) except FullResultSet: having, h_params = "", [] result = ["SELECT"] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = "%s AS %s" % ( s_sql, self.connection.ops.quote_name(alias), ) params.extend(s_params) out_cols.append(s_sql) result += [", ".join(out_cols)] if from_: result += ["FROM", *from_] elif self.connection.features.bare_select_suffix: result += [self.connection.features.bare_select_suffix] params.extend(f_params) if self.query.select_for_update and features.has_select_for_update: if ( self.connection.get_autocommit() # Don't raise an exception when database doesn't # support transactions, as it's a noop. and features.supports_transactions ): raise TransactionManagementError( "select_for_update cannot be used outside of a transaction." ) if ( with_limit_offset and not features.supports_select_for_update_with_limit ): raise NotSupportedError( "LIMIT/OFFSET is not supported with " "select_for_update on this database backend." ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not features.has_select_for_update_nowait: raise NotSupportedError( "NOWAIT is not supported on this database backend." ) elif skip_locked and not features.has_select_for_update_skip_locked: raise NotSupportedError( "SKIP LOCKED is not supported on this database backend." ) elif of and not features.has_select_for_update_of: raise NotSupportedError( "FOR UPDATE OF is not supported on this database backend." ) elif no_key and not features.has_select_for_no_key_update: raise NotSupportedError( "FOR NO KEY UPDATE is not supported on this " "database backend." ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and features.for_update_after_from: result.append(for_update_part) if where: result.append("WHERE %s" % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError( "annotate() + distinct(fields) is not implemented." ) order_by = order_by or self.connection.ops.force_no_ordering() result.append("GROUP BY %s" % ", ".join(grouping)) if self._meta_ordering: order_by = None if having: result.append("HAVING %s" % having) params.extend(h_params) if self.query.explain_info: result.insert( 0, self.connection.ops.explain_query_prefix( self.query.explain_info.format, **self.query.explain_info.options, ), ) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) order_by_sql = "ORDER BY %s" % ", ".join(ordering) if combinator and features.requires_compound_order_by_subquery: result = ["SELECT * FROM (", *result, ")", order_by_sql] else: result.append(order_by_sql) if with_limit_offset: result.append( self.connection.ops.limit_offset_sql( self.query.low_mark, self.query.high_mark ) ) if for_update_part and not features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if alias: sub_selects.append( "%s.%s" % ( self.connection.ops.quote_name("subquery"), self.connection.ops.quote_name(alias), ) ) else: select_clone = select.relabeled_clone( {select.alias: "subquery"} ) subselect, subparams = select_clone.as_sql( self, self.connection ) sub_selects.append(subselect) sub_params.extend(subparams) return "SELECT %s FROM (%s) subquery" % ( ", ".join(sub_selects), " ".join(result), ), tuple(sub_params + params) return " ".join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns( self, select_mask, start_alias=None, opts=None, from_parent=None ): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: if (opts := self.query.get_meta()) is None: return result start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if ( from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model ) ): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if select_mask and field not in select_mask: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins( parts, opts, None ) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(self.connection.ops.quote_name(name)) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name( self, name, opts, alias=None, default_order="ASC", already_seen=None ): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == "DESC" pieces = name.split(LOOKUP_SEP) ( field, targets, alias, joins, path, opts, transform_function, ) = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified or # there are transforms to process. if ( field.is_relation and opts.ordering and getattr(field, "attname", None) != pieces[-1] and name != "pk" and not getattr(transform_function, "has_transforms", False) ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple( getattr(self.query.alias_map[j], "join_cols", None) for j in joins ) if join_tuple in already_seen: raise FieldError("Infinite loop caused by ordering.") already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, "resolve_expression") and not isinstance( item, OrderBy ): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append( (item.prefix_references(f"{name}{LOOKUP_SEP}"), False) ) continue results.extend( (expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref) for expr, is_ref in self.find_ordering_name( item, opts, alias, order, already_seen ) ) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [ (OrderBy(transform_function(t, alias), descending=descending), False) for t in targets ] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins( pieces, opts, alias ) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if ( alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1 ): result.append(", %s" % self.quote_name_unless_alias(alias)) return result, params def get_related_selections( self, select, select_mask, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None, ): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain( direct_choices, reverse_choices, self.query._filtered_relations ) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info["related_klass_infos"] = related_klass_infos for f in opts.fields: fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or "(none)", ) ) else: next = False if not select_related_descend(f, restricted, requested, select_mask): continue related_select_mask = select_mask.get(f) or {} klass_info = { "model": f.remote_field.model, "field": f, "reverse": False, "local_setter": f.set_cached_value, "remote_setter": f.remote_field.set_cached_value if f.unique else lambda x, y: None, "from_parent": False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=f.remote_field.model._meta ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_klass_infos = self.get_related_selections( select, related_select_mask, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for related_field, model in related_fields: related_select_mask = select_mask.get(related_field) or {} if not select_related_descend( related_field, restricted, requested, related_select_mask, reverse=True, ): continue related_field_name = related_field.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins( [related_field_name], opts, root_alias ) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { "model": model, "field": related_field, "reverse": True, "local_setter": related_field.remote_field.set_cached_value, "remote_setter": related_field.set_cached_value, "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( related_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next = requested.get(related_field.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, related_select_mask, model._meta, alias, cur_depth + 1, next, restricted, ) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(final_field, obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: final_field.remote_field.set_cached_value(from_obj, obj) def local_setter_noop(obj, from_obj): pass def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) final_field, _, join_opts, joins, _, _ = self.query.setup_joins( [name], opts, root_alias ) model = join_opts.model alias = joins[-1] from_parent = ( issubclass(model, opts.model) and model is not opts.model ) klass_info = { "model": model, "field": final_field, "reverse": True, "local_setter": ( partial(local_setter, final_field) if len(joins) <= 2 else local_setter_noop ), "remote_setter": partial(remote_setter, name), "from_parent": from_parent, } related_klass_infos.append(klass_info) select_fields = [] field_select_mask = select_mask.get((name, final_field)) or {} columns = self.get_default_columns( field_select_mask, start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info["select_fields"] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, field_select_mask, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( "Invalid field name(s) given in select_related: %s. " "Choices are: %s" % ( ", ".join(invalid_fields), ", ".join(_get_field_choices()) or "(none)", ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info["model"]._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { "model": parent_model, "field": parent_link, "reverse": False, "select_fields": [ select_index for select_index in klass_info["select_fields"] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info["model"]._meta.concrete_model for select_index in klass_info["select_fields"]: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield "self" else: field = klass_info["field"] if klass_info["reverse"]: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get("related_klass_infos", []) ) if not self.klass_info: return [] result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == "self": col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get("related_klass_infos", []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info["field"] if related_klass_info["reverse"]: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( "Invalid field name(s) given in select_for_update(of=(...)): %s. " "Only relational fields followed in the query are allowed. " "Choices are: %s." % ( ", ".join(invalid_names), ", ".join(_get_field_choices()), ) ) return result def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter( self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE, ): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql( MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size ) fields = [s[0] for s in self.select[0 : self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql( self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE ): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0 : self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = "%s.%s" % (qn(alias), qn2(columns[index])) self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND) sql, params = self.as_sql() return "EXISTS (%s)" % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. format_ = self.query.explain_info.format output_formatter = json.dumps if format_ and format_.lower() == "json" else str for row in result[0]: if not isinstance(row, str): yield " ".join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = () def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, "as_sql"): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, "get_placeholder"): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = "%s", [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, "resolve_expression"): value = value.resolve_expression( self.query, allow_joins=False, for_save=True ) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' "can only be used to update, not to insert." % (value, field) ) if value.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, value) ) if value.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query (%s=%r)." % (field.name, value) ) return field.get_db_prep_save(value, connection=self.connection) def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement( on_conflict=self.query.on_conflict, ) result = ["%s %s" % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append("(%s)" % ", ".join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [ self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields ] for obj in self.query.objs ] else: # An empty object. value_rows = [ [self.connection.ops.pk_default_value()] for _ in self.query.objs ] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = ( not self.returning_fields and self.connection.features.has_bulk_insert ) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql( fields, self.query.on_conflict, (f.column for f in self.query.update_fields), (f.column for f in self.query.unique_fields), ) if ( self.returning_fields and self.connection.features.can_return_columns_from_insert ): if self.connection.features.can_return_rows_from_bulk_insert: result.append( self.connection.ops.bulk_insert_sql(fields, placeholder_rows) ) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns( self.returning_fields ) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if on_conflict_suffix_sql: result.append(on_conflict_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if ( self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1 ): rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [ self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, ) ] else: rows = [ ( self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ), ) ] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, "get_source_expressions"): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain( self.query.annotations.values(), self.query.where.children ) ) def _as_sql(self, query): delete = "DELETE FROM %s" % self.quote_name_unless_alias(query.base_table) try: where, params = self.compile(query.where) except FullResultSet: return delete, () return f"{delete} WHERE {where}", tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [pk.get_col(self.query.get_initial_alias())] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params) outerq.add_filter("pk__in", innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return "", () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, "resolve_expression"): val = val.resolve_expression( self.query, allow_joins=False, for_save=True ) if val.contains_aggregate: raise FieldError( "Aggregate functions are not allowed in this query " "(%s=%r)." % (field.name, val) ) if val.contains_over_clause: raise FieldError( "Window expressions are not allowed in this query " "(%s=%r)." % (field.name, val) ) elif hasattr(val, "prepare_database_save"): if field.remote_field: val = val.prepare_database_save(field) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, "get_placeholder"): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = "%s" name = field.column if hasattr(val, "as_sql"): sql, params = self.compile(val) values.append("%s = %s" % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append("%s = %s" % (qn(name), placeholder)) update_params.append(val) else: values.append("%s = NULL" % qn(name)) table = self.query.base_table result = [ "UPDATE %s SET" % qn(table), ", ".join(values), ] try: where, params = self.compile(self.query.where) except FullResultSet: params = [] else: result.append("WHERE %s" % where) return " ".join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] meta = query.get_meta() fields = [meta.pk.name] related_ids_index = [] for related in self.query.related_updates: if all( path.join_field.primary_key for path in meta.get_path_to_parent(related) ): # If a primary key chain exists to the targeted related update, # then the meta.pk value can be used for it. related_ids_index.append((related, 0)) else: # This branch will only be reached when updating a field of an # ancestor that is not part of the primary key chain of a MTI # tree. related_ids_index.append((related, len(fields))) fields.append(related._meta.pk.name) query.add_fields(fields) super().pre_sql_setup() must_pre_select = ( count > 1 and not self.connection.features.update_can_self_select ) # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] related_ids = collections.defaultdict(list) for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) for parent, index in related_ids_index: related_ids[parent].extend(r[index] for r in rows) self.query.add_filter("pk__in", idents) self.query.related_ids = related_ids else: # The fast path. Filters and updates in one query. self.query.add_filter("pk__in", query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ", ".join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql) params += inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
castiel248/Convert
Lib/site-packages/django/db/models/sql/compiler.py
Python
mit
89,132
""" Constants specific to the SQL storage portion of the ORM. """ # Size of each "chunk" for get_iterator calls. # Larger values are slightly faster at the expense of more storage space. GET_ITERATOR_CHUNK_SIZE = 100 # Namedtuples for sql.* internal use. # How many results to expect from a cursor.execute call MULTI = "multi" SINGLE = "single" CURSOR = "cursor" NO_RESULTS = "no results" ORDER_DIR = { "ASC": ("ASC", "DESC"), "DESC": ("DESC", "ASC"), } # SQL join types. INNER = "INNER JOIN" LOUTER = "LEFT OUTER JOIN"
castiel248/Convert
Lib/site-packages/django/db/models/sql/constants.py
Python
mit
533
""" Useful auxiliary data structures for query construction. Not useful outside the SQL domain. """ from django.core.exceptions import FullResultSet from django.db.models.sql.constants import INNER, LOUTER class MultiJoin(Exception): """ Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally). """ def __init__(self, names_pos, path_with_names): self.level = names_pos # The path travelled, this includes the path to the multijoin. self.names_with_path = path_with_names class Empty: pass class Join: """ Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the FROM entry. For example, the SQL generated could be LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id") This class is primarily used in Query.alias_map. All entries in alias_map must be Join compatible by providing the following attributes and methods: - table_name (string) - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - parent_alias (which table is this join's parent, can be None similarly to join_type) - as_sql() - relabeled_clone() """ def __init__( self, table_name, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation=None, ): # Join table self.table_name = table_name self.parent_alias = parent_alias # Note: table_alias is not necessarily known at instantiation time. self.table_alias = table_alias # LOUTER or INNER self.join_type = join_type # A list of 2-tuples to use in the ON clause of the JOIN. # Each 2-tuple will create one join condition in the ON clause. self.join_cols = join_field.get_joining_columns() # Along which field (or ForeignObjectRel in the reverse join case) self.join_field = join_field # Is this join nullabled? self.nullable = nullable self.filtered_relation = filtered_relation def as_sql(self, compiler, connection): """ Generate the full LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params clause for this join. """ join_conditions = [] params = [] qn = compiler.quote_name_unless_alias qn2 = connection.ops.quote_name # Add a join condition for each pair of joining columns. for lhs_col, rhs_col in self.join_cols: join_conditions.append( "%s.%s = %s.%s" % ( qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias), qn2(rhs_col), ) ) # Add a single condition inside parentheses for whatever # get_extra_restriction() returns. extra_cond = self.join_field.get_extra_restriction( self.table_alias, self.parent_alias ) if extra_cond: extra_sql, extra_params = compiler.compile(extra_cond) join_conditions.append("(%s)" % extra_sql) params.extend(extra_params) if self.filtered_relation: try: extra_sql, extra_params = compiler.compile(self.filtered_relation) except FullResultSet: pass else: join_conditions.append("(%s)" % extra_sql) params.extend(extra_params) if not join_conditions: # This might be a rel on the other end of an actual declared field. declared_field = getattr(self.join_field, "field", self.join_field) raise ValueError( "Join generated an empty ON clause. %s did not yield either " "joining columns or extra restrictions." % declared_field.__class__ ) on_clause_sql = " AND ".join(join_conditions) alias_str = ( "" if self.table_alias == self.table_name else (" %s" % self.table_alias) ) sql = "%s %s%s ON (%s)" % ( self.join_type, qn(self.table_name), alias_str, on_clause_sql, ) return sql, params def relabeled_clone(self, change_map): new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) new_table_alias = change_map.get(self.table_alias, self.table_alias) if self.filtered_relation is not None: filtered_relation = self.filtered_relation.clone() filtered_relation.path = [ change_map.get(p, p) for p in self.filtered_relation.path ] else: filtered_relation = None return self.__class__( self.table_name, new_parent_alias, new_table_alias, self.join_type, self.join_field, self.nullable, filtered_relation=filtered_relation, ) @property def identity(self): return ( self.__class__, self.table_name, self.parent_alias, self.join_field, self.filtered_relation, ) def __eq__(self, other): if not isinstance(other, Join): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): # Ignore filtered_relation in equality check. return self.identity[:-1] == other.identity[:-1] def demote(self): new = self.relabeled_clone({}) new.join_type = INNER return new def promote(self): new = self.relabeled_clone({}) new.join_type = LOUTER return new class BaseTable: """ The BaseTable class is used for base table references in FROM clause. For example, the SQL "foo" in SELECT * FROM "foo" WHERE somecond could be generated by this class. """ join_type = None parent_alias = None filtered_relation = None def __init__(self, table_name, alias): self.table_name = table_name self.table_alias = alias def as_sql(self, compiler, connection): alias_str = ( "" if self.table_alias == self.table_name else (" %s" % self.table_alias) ) base_sql = compiler.quote_name_unless_alias(self.table_name) return base_sql + alias_str, [] def relabeled_clone(self, change_map): return self.__class__( self.table_name, change_map.get(self.table_alias, self.table_alias) ) @property def identity(self): return self.__class__, self.table_name, self.table_alias def __eq__(self, other): if not isinstance(other, BaseTable): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): return self.identity == other.identity
castiel248/Convert
Lib/site-packages/django/db/models/sql/datastructures.py
Python
mit
7,297
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import sys from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef, Value, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from django.utils.tree import Node __all__ = ["Query", "RawQuery"] # Quotation marks ('"`[]), whitespace characters, semicolons, or inline # SQL comments are forbidden in column aliases. FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/") # Inspired from # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+") def get_field_names_from_opts(opts): if opts is None: return set() return set( chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() ) ) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( "JoinInfo", ("final_field", "targets", "opts", "joins", "path", "transform_function"), ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=()): self.params = params self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): if self.params is None: return None return dict if isinstance(self.params, Mapping) else tuple def __str__(self): if self.params_type is None: return self.sql return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} elif params_type is None: params = None else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) ExplainInfo = namedtuple("ExplainInfo", ("format", "options")) class Query(BaseExpression): """A single SQL query.""" alias_prefix = "T" empty_result_set_value = None subq_aliases = frozenset([alias_prefix]) compiler = "SQLCompiler" base_table_class = BaseTable join_class = Join default_cols = True default_ordering = True standard_ordering = True filter_is_sticky = False subquery = False # SQL-related attributes. # Select and related select clauses are expressions to use in the SELECT # clause of the query. The select is used for cases where we want to set up # the select clause to contain other than default fields (values(), # subqueries...). Note that annotations go to annotations dictionary. select = () # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. group_by = None order_by = () low_mark = 0 # Used for offset/limit. high_mark = None # Used for offset/limit. distinct = False distinct_fields = () select_for_update = False select_for_update_nowait = False select_for_update_skip_locked = False select_for_update_of = () select_for_no_key_update = False select_related = False has_select_fields = False # Arbitrary limit for select_related to prevents infinite recursion. max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. values_select = () # SQL annotation-related attributes. annotation_select_mask = None _annotation_select_cache = None # Set combination attributes. combinator = None combinator_all = False combined_queries = () # These are for extensions. The contents are more or less appended verbatim # to the appropriate clause. extra_select_mask = None _extra_select_cache = None extra_tables = () extra_order_by = () # A tuple that is a set of model field names and either True, if these are # the fields to defer, or False if these are the only fields to load. deferred_loading = (frozenset(), True) explain_info = None def __init__(self, model, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.used_aliases = set() self.where = WhereNode() # Maps alias -> Annotation Expression. self.annotations = {} # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self._filtered_relations = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, "target", None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None, elide_empty=True): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)( self, connection, using, elide_empty ) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ if self.model: return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is not None: obj.annotation_select_mask = self.annotation_select_mask.copy() if self.combined_queries: obj.combined_queries = tuple( [query.clone() for query in self.combined_queries] ) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is not None: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is not None: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if "subq_aliases" in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property, if it exists. obj.__dict__.pop("base_table", None) return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, "_setup_query"): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def get_aggregation(self, using, aggregate_exprs): """ Return the dictionary with the values of the existing aggregations. """ if not aggregate_exprs: return {} # Store annotation mask prior to temporarily adding aggregations for # resolving purpose to facilitate their subsequent removal. refs_subquery = False replacements = {} annotation_select_mask = self.annotation_select_mask for alias, aggregate_expr in aggregate_exprs.items(): self.check_alias(alias) aggregate = aggregate_expr.resolve_expression( self, allow_joins=True, reuse=None, summarize=True ) if not aggregate.contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) # Temporarily add aggregate to annotations to allow remaining # members of `aggregates` to resolve against each others. self.append_annotation_mask([alias]) refs_subquery |= any( getattr(self.annotations[ref], "subquery", False) for ref in aggregate.get_refs() ) aggregate = aggregate.replace_expressions(replacements) self.annotations[alias] = aggregate replacements[Ref(alias, aggregate)] = aggregate # Stash resolved aggregates now that they have been allowed to resolve # against each other. aggregates = {alias: self.annotations.pop(alias) for alias in aggregate_exprs} self.set_annotation_mask(annotation_select_mask) # Existing usage of aggregation can be determined by the presence of # selected aggregates but also by filters against aliased aggregates. _, having, qualify = self.where.split_having_qualify() has_existing_aggregation = ( any( getattr(annotation, "contains_aggregate", True) for annotation in self.annotations.values() ) or having ) # Decide if we need to use a subquery. # # Existing aggregations would cause incorrect results as # get_aggregation() must produce just one result and thus must not use # GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if ( isinstance(self.group_by, tuple) or self.is_sliced or has_existing_aggregation or refs_subquery or qualify or self.distinct or self.combinator ): from django.db.models.sql.subqueries import AggregateQuery inner_query = self.clone() inner_query.subquery = True outer_query = AggregateQuery(self.model, inner_query) inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) # Queries with distinct_fields need ordering and when a limit is # applied we must take the slice from the ordered query. Otherwise # no need for ordering. inner_query.clear_ordering(force=False) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. if inner_query.default_cols and has_existing_aggregation: inner_query.group_by = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) inner_query.default_cols = False if not qualify: # Mask existing annotations that are not referenced by # aggregates to be pushed to the outer query unless # filtering against window functions is involved as it # requires complex realising. annotation_mask = set() if isinstance(self.group_by, tuple): for expr in self.group_by: annotation_mask |= expr.get_refs() for aggregate in aggregates.values(): annotation_mask |= aggregate.get_refs() inner_query.set_annotation_mask(annotation_mask) # Add aggregates to the outer AggregateQuery. This requires making # sure all columns referenced by the aggregates are selected in the # inner query. It is achieved by retrieving all column references # by the aggregates, explicitly selecting them in the inner query, # and making sure the aggregates are repointed to them. col_refs = {} for alias, aggregate in aggregates.items(): replacements = {} for col in self._gen_cols([aggregate], resolve_refs=False): if not (col_ref := col_refs.get(col)): index = len(col_refs) + 1 col_alias = f"__col{index}" col_ref = Ref(col_alias, col) col_refs[col] = col_ref inner_query.annotations[col_alias] = col inner_query.append_annotation_mask([col_alias]) replacements[col] = col_ref outer_query.annotations[alias] = aggregate.replace_expressions( replacements ) if ( inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask ): # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = ( self.model._meta.pk.get_col(inner_query.get_initial_alias()), ) else: outer_query = self self.select = () self.default_cols = False self.extra = {} if self.annotations: # Inline reference to existing annotations and mask them as # they are unnecessary given only the summarized aggregations # are requested. replacements = { Ref(alias, annotation): annotation for alias, annotation in self.annotations.items() } self.annotations = { alias: aggregate.replace_expressions(replacements) for alias, aggregate in aggregates.items() } else: self.annotations = aggregates self.set_annotation_mask(aggregates) empty_set_result = [ expression.empty_result_set_value for expression in outer_query.annotation_select.values() ] elide_empty = not any(result is NotImplemented for result in empty_set_result) outer_query.clear_ordering(force=True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using, elide_empty=elide_empty) result = compiler.execute_sql(SINGLE) if result is None: result = empty_set_result else: converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() return obj.get_aggregation(using, {"__count": Count("*")})["__count"] def has_filters(self): return self.where def exists(self, limit=True): q = self.clone() if not (q.distinct and q.is_sliced): if q.group_by is True: q.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() if q.combined_queries and q.combinator == "union": q.combined_queries = tuple( combined_query.exists(limit=False) for combined_query in q.combined_queries ) q.clear_ordering(force=True) if limit: q.set_limits(high=1) q.add_annotation(Value(1), "a") return q def has_results(self, using): q = self.exists(using) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() for option_name in options: if ( not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name) or "--" in option_name ): raise ValueError(f"Invalid option name: {option_name!r}.") q.explain_info = ExplainInfo(format, options) compiler = q.get_compiler(using=using) return "\n".join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ if self.model != rhs.model: raise TypeError("Cannot combine queries on two different base models.") if self.is_sliced: raise TypeError("Cannot combine queries once a slice has been taken.") if self.distinct != rhs.distinct: raise TypeError("Cannot combine a unique query with a non-unique query.") if self.distinct_fields != rhs.distinct_fields: raise TypeError("Cannot combine queries with different distinct fields.") # If lhs and rhs shares the same alias prefix, it is possible to have # conflicting alias changes like T4 -> T5, T5 -> T6, which might end up # as T4 -> T6 while combining two querysets. To prevent this, change an # alias prefix of the rhs and update current aliases accordingly, # except if the alias is the base table since it must be present in the # query on both sides. initial_alias = self.get_initial_alias() rhs.bump_prefix(self, exclude={initial_alias}) # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = connector == AND # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER ) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Combine subqueries aliases to ensure aliases relabelling properly # handle subqueries when combining where and select clauses. self.subq_aliases |= rhs.subq_aliases # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError( "When merging querysets using 'or', you cannot have " "extra(select=...) on both sides." ) self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def _get_defer_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # All concrete fields that are not part of the defer mask must be # loaded. If a relational field is encountered it gets added to the # mask for it be considered if `select_related` and the cycle continues # by recursively caling this function. for field in opts.concrete_fields: field_mask = mask.pop(field.name, None) field_att_mask = mask.pop(field.attname, None) if field_mask is None and field_att_mask is None: select_mask.setdefault(field, {}) elif field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) field_select_mask = select_mask.setdefault(field, {}) related_model = field.remote_field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) # Remaining defer entries must be references to reverse relationships. # The following code is expected to raise FieldError if it encounters # a malformed defer entry. for field_name, field_mask in mask.items(): if filtered_relation := self._filtered_relations.get(field_name): relation = opts.get_field(filtered_relation.relation_name) field_select_mask = select_mask.setdefault((field_name, relation), {}) field = relation.field else: reverse_rel = opts.get_field(field_name) # While virtual fields such as many-to-many and generic foreign # keys cannot be effectively deferred we've historically # allowed them to be passed to QuerySet.defer(). Ignore such # field references until a layer of validation at mask # alteration time will be implemented eventually. if not hasattr(reverse_rel, "field"): continue field = reverse_rel.field field_select_mask = select_mask.setdefault(field, {}) related_model = field.model._meta.concrete_model self._get_defer_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def _get_only_select_mask(self, opts, mask, select_mask=None): if select_mask is None: select_mask = {} select_mask[opts.pk] = {} # Only include fields mentioned in the mask. for field_name, field_mask in mask.items(): field = opts.get_field(field_name) # Retrieve the actual field associated with reverse relationships # as that's what is expected in the select mask. if field in opts.related_objects: field_key = field.field else: field_key = field field_select_mask = select_mask.setdefault(field_key, {}) if field_mask: if not field.is_relation: raise FieldError(next(iter(field_mask))) related_model = field.remote_field.model._meta.concrete_model self._get_only_select_mask( related_model._meta, field_mask, field_select_mask ) return select_mask def get_select_mask(self): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. """ field_names, defer = self.deferred_loading if not field_names: return {} mask = {} for field_name in field_names: part_mask = mask for part in field_name.split(LOOKUP_SEP): part_mask = part_mask.setdefault(part, {}) opts = self.get_meta() if defer: return self._get_defer_select_mask(opts, mask) return self._get_only_select_mask(opts, mask) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = ( filtered_relation.alias if filtered_relation is not None else table_name ) self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = ( parent_alias and self.alias_map[parent_alias].join_type == LOUTER ) already_louter = self.alias_map[alias].join_type == LOUTER if (self.alias_map[alias].nullable or parent_louter) and not already_louter: self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ # If keys and values of change_map were to intersect, an alias might be # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending # on their order in change_map. assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple( [col.relabeled_clone(change_map) for col in self.group_by] ) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, other_query, exclude=None): """ Change the alias prefix to the next letter in the alphabet in a way that the other query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. To prevent changing aliases use the exclude parameter. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet for s in product(seq, repeat=n): yield "".join(s) prefix = None if self.alias_prefix != other_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( "Maximum recursion depth exceeded: too many subqueries." ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases) if exclude is None: exclude = {} self.change_aliases( { alias: "%s%d" % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) if alias not in exclude } ) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) elif self.model: alias = self.join(self.base_table_class(self.get_meta().db_table, None)) else: alias = None return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a base_table_class or join_class. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias( join.table_name, create=True, filtered_relation=join.filtered_relation ) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def check_alias(self, alias): if FORBIDDEN_ALIAS_PATTERN.search(alias): raise ValueError( "Column aliases cannot contain whitespace characters, quotation marks, " "semicolons, or SQL comments." ) def add_annotation(self, annotation, alias, select=True): """Add a single annotation expression to the Query.""" self.check_alias(alias) annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None) if select: self.append_annotation_mask([alias]) else: self.set_annotation_mask(set(self.annotation_select).difference({alias})) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) # Resolve combined queries. if clone.combinator: clone.combined_queries = tuple( [ combined_query.resolve_expression(query, *args, **kwargs) for combined_query in clone.combined_queries ] ) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, "external_aliases"): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias ) or ( isinstance(table, BaseTable) and table.table_name != table.table_alias ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs, include_external=True) if col.alias in self.external_aliases ] def get_group_by_cols(self, wrapper=None): # If wrapper is referenced by an alias for an explicit GROUP BY through # values() a reference to this expression and not the self must be # returned to ensure external column references are not grouped against # as well. external_cols = self.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [wrapper or self] return external_cols def as_sql(self, compiler, connection): # Some backends (e.g. Oracle) raise an error when a subquery contains # unnecessary ORDER BY clause. if ( self.subquery and not connection.features.ignores_unnecessary_order_by_in_subqueries ): self.clear_ordering(force=False) for query in self.combined_queries: query.clear_ordering(force=False) sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = "(%s)" % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, "resolve_expression"): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. values = ( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) type_ = type(value) if hasattr(type_, "_make"): # namedtuple return type_(*values) return type_(values) return value def solve_lookup_type(self, lookup, summarize=False): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: annotation, expression_lookups = refs_expression( lookup_splitted, self.annotations ) if annotation: expression = self.annotations[annotation] if summarize: expression = Ref(annotation, expression) return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, "_meta"): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name) ) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if ( isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field) ): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, "_meta"): self.check_query_object_type(value, opts, field) elif hasattr(value, "__iter__"): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if hasattr(expression, "resolve_expression") and not getattr( expression, "filterable", True ): raise NotSupportedError( expression.__class__.__name__ + " is disallowed in the filter " "clause." ) if hasattr(expression, "get_source_expressions"): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ["exact"] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = "exact" lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ("exact", "iexact"): raise ValueError("Cannot use None as a query value") return lhs.get_lookup("isnull")(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if ( lookup_name == "exact" and lookup.rhs == "" and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ): return lhs.get_lookup("isnull")(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches( name, lhs.output_field.get_lookups() ) if suggested_lookups: suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups) else: suggestion = "." raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter( self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True, summarize=False, ): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, summarize=summarize, ) if hasattr(filter_expr, "resolve_expression"): if not getattr(filter_expr, "conditional", False): raise TypeError("Cannot filter against a non-conditional expression.") condition = filter_expr.resolve_expression( self, allow_joins=allow_joins, summarize=summarize ) if not isinstance(condition, Lookup): condition = self.build_lookup(["exact"], condition, True) return WhereNode([condition], connector=AND), [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg, summarize) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = { k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0) } if check_filterable: self.check_filterable(value) if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) return WhereNode([condition], connector=AND), [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource( alias, targets, join_info.targets, join_info.final_field ) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause = WhereNode([condition], connector=AND) require_outer = ( lookup_type == "isnull" and condition.rhs is True and not current_negated ) if ( current_negated and (lookup_type != "isnull" or condition.rhs is False) and condition.rhs is not None ): require_outer = True if lookup_type != "isnull": # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup("isnull") col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup("isnull") clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_lhs, filter_rhs): self.add_q(Q((filter_lhs, filter_rhs))) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = { a for a in self.alias_map if self.alias_map[a].join_type == INNER } clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def clear_where(self): self.where = WhereNode() def _add_q( self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True, summarize=False, ): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter( q_object.connector, len(q_object.children), current_negated ) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, summarize=summarize, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q( self, q_object, reuse, branch_negated=False, current_negated=False ): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type( filtered_relation.relation_name ) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == "pk": name = opts.pk.name field = None filtered_relation = None try: if opts is None: raise FieldDoesNotExist field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted( [ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available)) ) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if opts is not None and model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, "path_infos"): if filtered_relation: pathinfos = field.get_path_info(filtered_relation) else: pathinfos = field.path_infos if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name) ) break return path, final_field, targets, names[pos + 1 :] def setup_joins( self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False, ): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): if not self.alias_cols: alias = None return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial( transform, name=name, previous=final_transformer ) final_transformer.has_transforms = True # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = self.join_class( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = { r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets } targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs, include_external=False, resolve_refs=True): for expr in exprs: if isinstance(expr, Col): yield expr elif include_external and callable( getattr(expr, "get_external_cols", None) ): yield from expr.get_external_cols() elif hasattr(expr, "get_source_expressions"): if not resolve_refs and isinstance(expr, Ref): continue yield from cls._gen_cols( expr.get_source_expressions(), include_external=include_external, resolve_refs=resolve_refs, ) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( "Joined field references are not permitted in this query" ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " "to promote it." % name ) return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) annotation = self.annotations.get(field_list[0]) if annotation is not None: for transform in field_list[1:]: annotation = self.try_transform(annotation, transform) return annotation join_info = self.setup_joins( field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse ) targets, final_alias, join_list = self.trim_joins( join_info.targets, join_info.joins, join_info.path ) if not allow_joins and len(join_list) > 1: raise FieldError( "Joined field references are not permitted in this query" ) if len(targets) > 1: raise FieldError( "Referencing multicolumn fields with F() objects isn't supported" ) # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. transform = join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return transform def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT EXISTS( SELECT 1 FROM child WHERE name = 'foo' AND child.parent_id = parent.id LIMIT 1 ) """ # Generate the inner query. query = self.__class__(self.model) query._filtered_relations = self._filtered_relations filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_rhs = OuterRef(filter_rhs) elif isinstance(filter_rhs, F): filter_rhs = OuterRef(filter_rhs.name) query.add_filter(filter_lhs, filter_rhs) query.clear_ordering(force=True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) col = query.select[0] select_field = col.target alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup("exact") # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True lookup_class = select_field.get_lookup("exact") lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter(Exists(query)) if contains_louter: or_null_condition, _ = self.build_filter( ("%s__isnull" % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse, ) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col, name): self.select += (col,) self.values_select += (name,) def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins( name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m ) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise elif name in self.annotations: raise FieldError( "Cannot select the '%s' alias. Use annotate() to promote " "it." % name ) else: names = sorted( [ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations, ] ) raise FieldError( "Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names)) ) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if item == "?": continue if item.startswith("-"): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, "resolve_expression"): errors.append(item) if getattr(item, "contains_aggregate", False): raise FieldError( "Using an aggregate in order_by() without also including " "it in annotate() is not allowed: %s" % item ) if errors: raise FieldError("Invalid order_by arguments: %s" % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force=False, clear_default=True): """ Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default). """ if not force and ( self.is_sliced or self.distinct_fields or self.select_for_update ): return self.order_by = () self.extra_order_by = () if clear_default: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ if allow_aliases and self.values_select: # If grouping by aliases is allowed assign selected value aliases # by moving them to annotations. group_by_annotations = {} values_select = {} for alias, expr in zip(self.values_select, self.select): if isinstance(expr, Col): values_select[alias] = expr else: group_by_annotations[alias] = expr self.annotations = {**group_by_annotations, **self.annotations} self.append_annotation_mask(group_by_annotations) self.select = tuple(values_select.values()) self.values_select = tuple(values_select) group_by = list(self.select) for alias, annotation in self.annotation_select.items(): if not (group_by_cols := annotation.get_group_by_cols()): continue if allow_aliases and not annotation.contains_aggregate: group_by.append(Ref(alias, annotation)) else: group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): self.check_alias(name) entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != "%": entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. if new_existing := existing.difference(field_names): self.deferred_loading = new_existing, False else: self.clear_deferred_loading() if new_only := set(field_names).difference(existing): self.deferred_loading = new_only, True def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if "pk" in field_names: field_names.remove("pk") field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() self.has_select_fields = True if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) selected = frozenset(field_names + extra_names + annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] selected = frozenset(field_names) # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields( (f.attname for f in self.model._meta.concrete_fields), False ) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in selected: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append(join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( None, lookup_tables[trimmed_paths + 1] ) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a join_class instead of a # base_table_class reference. But the first entry in the query's FROM # clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = self.base_table_class( self.alias_map[table].table_name, table, ) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return field.null or ( field.empty_strings_allowed and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls ) def get_order_dir(field, default="ASC"): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == "-": return field[1:], dirn[1] return field, dirn[0] class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def __repr__(self): return ( f"{self.__class__.__qualname__}(connector={self.connector!r}, " f"num_children={self.num_children!r}, negated={self.negated!r})" ) def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == OR and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == AND or ( self.effective_connector == OR and votes == self.num_children ): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
castiel248/Convert
Lib/site-packages/django/db/models/sql/query.py
Python
mit
114,554
""" Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.core.exceptions import FieldError from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS from django.db.models.sql.query import Query __all__ = ["DeleteQuery", "UpdateQuery", "InsertQuery", "AggregateQuery"] class DeleteQuery(Query): """A DELETE SQL query.""" compiler = "SQLDeleteCompiler" def do_query(self, table, where, using): self.alias_map = {table: self.alias_map[table]} self.where = where cursor = self.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 def delete_batch(self, pk_list, using): """ Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. """ # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f"{field.attname}__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query( self.get_meta().db_table, self.where, using=using ) return num_deleted class UpdateQuery(Query): """An UPDATE SQL query.""" compiler = "SQLUpdateCompiler" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._setup_query() def _setup_query(self): """ Run on initialization and at the end of chaining. Any attributes that would normally be set in __init__() should go here instead. """ self.values = [] self.related_ids = None self.related_updates = {} def clone(self): obj = super().clone() obj.related_updates = self.related_updates.copy() return obj def update_batch(self, pk_list, values, using): self.add_update_values(values) for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( "pk__in", pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE] ) self.get_compiler(using).execute_sql(NO_RESULTS) def add_update_values(self, values): """ Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets. """ values_seq = [] for name, val in values.items(): field = self.get_meta().get_field(name) direct = ( not (field.auto_created and not field.concrete) or not field.concrete ) model = field.model._meta.concrete_model if not direct or (field.is_relation and field.many_to_many): raise FieldError( "Cannot update model field %r (only non-relations and " "foreign keys permitted)." % field ) if model is not self.get_meta().concrete_model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) def add_update_fields(self, values_seq): """ Append a sequence of (field, model, value) triples to the internal list that will be used to generate the UPDATE query. Might be more usefully called add_update_targets() to hint at the extra information here. """ for field, model, val in values_seq: if hasattr(val, "resolve_expression"): # Resolve expressions here so that annotations are no longer needed val = val.resolve_expression(self, allow_joins=False, for_save=True) self.values.append((field, model, val)) def add_related_update(self, model, field, value): """ Add (name, value) to an update query for an ancestor model. Update are coalesced so that only one update query per ancestor is run. """ self.related_updates.setdefault(model, []).append((field, None, value)) def get_related_updates(self): """ Return a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table. """ if not self.related_updates: return [] result = [] for model, values in self.related_updates.items(): query = UpdateQuery(model) query.values = values if self.related_ids is not None: query.add_filter("pk__in", self.related_ids[model]) result.append(query) return result class InsertQuery(Query): compiler = "SQLInsertCompiler" def __init__( self, *args, on_conflict=None, update_fields=None, unique_fields=None, **kwargs ): super().__init__(*args, **kwargs) self.fields = [] self.objs = [] self.on_conflict = on_conflict self.update_fields = update_fields or [] self.unique_fields = unique_fields or [] def insert_values(self, fields, objs, raw=False): self.fields = fields self.objs = objs self.raw = raw class AggregateQuery(Query): """ Take another query as a parameter to the FROM clause and only select the elements in the provided list. """ compiler = "SQLAggregateCompiler" def __init__(self, model, inner_query): self.inner_query = inner_query super().__init__(model)
castiel248/Convert
Lib/site-packages/django/db/models/sql/subqueries.py
Python
mit
5,935
""" Code to manage the creation and SQL rendering of 'where' constraints. """ import operator from functools import reduce from django.core.exceptions import EmptyResultSet, FullResultSet from django.db.models.expressions import Case, When from django.db.models.lookups import Exact from django.utils import tree from django.utils.functional import cached_property # Connection types AND = "AND" OR = "OR" XOR = "XOR" class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND resolved = False conditional = True def split_having_qualify(self, negated=False, must_group_by=False): """ Return three possibly None nodes: one for those parts of self that should be included in the WHERE clause, one for those parts of self that must be included in the HAVING clause, and one for those parts that refer to window functions. """ if not self.contains_aggregate and not self.contains_over_clause: return self, None, None in_negated = negated ^ self.negated # Whether or not children must be connected in the same filtering # clause (WHERE > HAVING > QUALIFY) to maintain logical semantic. must_remain_connected = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR) or self.connector == XOR ) if ( must_remain_connected and self.contains_aggregate and not self.contains_over_clause ): # It's must cheaper to short-circuit and stash everything in the # HAVING clause than split children if possible. return None, self, None where_parts = [] having_parts = [] qualify_parts = [] for c in self.children: if hasattr(c, "split_having_qualify"): where_part, having_part, qualify_part = c.split_having_qualify( in_negated, must_group_by ) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) if qualify_part is not None: qualify_parts.append(qualify_part) elif c.contains_over_clause: qualify_parts.append(c) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) if must_remain_connected and qualify_parts: # Disjunctive heterogeneous predicates can be pushed down to # qualify as long as no conditional aggregation is involved. if not where_parts or (where_parts and not must_group_by): return None, None, self elif where_parts: # In theory this should only be enforced when dealing with # where_parts containing predicates against multi-valued # relationships that could affect aggregation results but this # is complex to infer properly. raise NotImplementedError( "Heterogeneous disjunctive predicates against window functions are " "not implemented when performing conditional aggregation." ) where_node = ( self.create(where_parts, self.connector, self.negated) if where_parts else None ) having_node = ( self.create(having_parts, self.connector, self.negated) if having_parts else None ) qualify_node = ( self.create(qualify_parts, self.connector, self.negated) if qualify_parts else None ) return where_node, having_node, qualify_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) if self.connector == XOR and not connection.features.supports_logical_xor: # Convert if the database doesn't support XOR: # a XOR b XOR c XOR ... # to: # (a OR b OR c OR ...) AND (a + b + c + ...) == 1 lhs = self.__class__(self.children, OR) rhs_sum = reduce( operator.add, (Case(When(c, then=1), default=0) for c in self.children), ) rhs = Exact(1, rhs_sum) return self.__class__([lhs, rhs], AND, self.negated).as_sql( compiler, connection ) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 except FullResultSet: full_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: raise FullResultSet else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: raise FullResultSet conn = " %s " % self.connector sql_string = conn.join(result) if not sql_string: raise FullResultSet if self.negated: # Some backends (Oracle at least) need parentheses around the inner # SQL in the negated case, even if the inner SQL contains just a # single expression. sql_string = "NOT (%s)" % sql_string elif len(result) > 1 or self.resolved: sql_string = "(%s)" % sql_string return sql_string, result_params def get_group_by_cols(self): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, "relabel_aliases"): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, "relabeled_clone"): self.children[pos] = child.relabeled_clone(change_map) def clone(self): clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, "clone"): child = child.clone() clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone def replace_expressions(self, replacements): if replacement := replacements.get(self): return replacement clone = self.create(connector=self.connector, negated=self.negated) for child in self.children: clone.children.append(child.replace_expressions(replacements)) return clone def get_refs(self): refs = set() for child in self.children: refs |= child.get_refs() return refs @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) @staticmethod def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr, "resolve_expression"): expr = expr.resolve_expression(query, *args, **kwargs) return expr @classmethod def _resolve_node(cls, node, query, *args, **kwargs): if hasattr(node, "children"): for child in node.children: cls._resolve_node(child, query, *args, **kwargs) if hasattr(node, "lhs"): node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs) if hasattr(node, "rhs"): node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs) def resolve_expression(self, *args, **kwargs): clone = self.clone() clone._resolve_node(clone, *args, **kwargs) clone.resolved = True return clone @cached_property def output_field(self): from django.db.models import BooleanField return BooleanField() @property def _output_field_or_none(self): return self.output_field def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END" return sql, params def get_db_converters(self, connection): return self.output_field.get_db_converters(connection) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def leaves(self): for child in self.children: if isinstance(child, WhereNode): yield from child.leaves() else: yield child class NothingNode: """A node that matches nothing.""" contains_aggregate = False contains_over_clause = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates or windows are used. contains_aggregate = False contains_over_clause = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates or windows would be used in a subquery, # the outer query isn't interested about those. contains_aggregate = False contains_over_clause = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets query_object.clear_ordering(clear_default=True) self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
castiel248/Convert
Lib/site-packages/django/db/models/sql/where.py
Python
mit
12,710
import functools from collections import namedtuple def make_model_tuple(model): """ Take a model or a string of the form "app_label.ModelName" and return a corresponding ("app_label", "modelname") tuple. If a tuple is passed in, assume it's a valid model tuple already and return it unchanged. """ try: if isinstance(model, tuple): model_tuple = model elif isinstance(model, str): app_label, model_name = model.split(".") model_tuple = app_label, model_name.lower() else: model_tuple = model._meta.app_label, model._meta.model_name assert len(model_tuple) == 2 return model_tuple except (ValueError, AssertionError): raise ValueError( "Invalid model reference '%s'. String model references " "must be of the form 'app_label.ModelName'." % model ) def resolve_callables(mapping): """ Generate key/value pairs for the given mapping where the values are evaluated if they're callable. """ for k, v in mapping.items(): yield k, v() if callable(v) else v def unpickle_named_row(names, values): return create_namedtuple_class(*names)(*values) @functools.lru_cache def create_namedtuple_class(*names): # Cache type() with @lru_cache since it's too slow to be called for every # QuerySet evaluation. def __reduce__(self): return unpickle_named_row, (names, tuple(self)) return type( "Row", (namedtuple("Row", names),), {"__reduce__": __reduce__, "__slots__": ()}, ) class AltersData: """ Make subclasses preserve the alters_data attribute on overridden methods. """ def __init_subclass__(cls, **kwargs): for fn_name, fn in vars(cls).items(): if callable(fn) and not hasattr(fn, "alters_data"): for base in cls.__bases__: if base_fn := getattr(base, fn_name, None): if hasattr(base_fn, "alters_data"): fn.alters_data = base_fn.alters_data break super().__init_subclass__(**kwargs)
castiel248/Convert
Lib/site-packages/django/db/models/utils.py
Python
mit
2,182
from contextlib import ContextDecorator, contextmanager from django.db import ( DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections, ) class TransactionManagementError(ProgrammingError): """Transaction management is used improperly.""" pass def get_connection(using=None): """ Get a database connection by name, or the default database connection if no name is provided. This is a private API. """ if using is None: using = DEFAULT_DB_ALIAS return connections[using] def get_autocommit(using=None): """Get the autocommit status of the connection.""" return get_connection(using).get_autocommit() def set_autocommit(autocommit, using=None): """Set the autocommit status of the connection.""" return get_connection(using).set_autocommit(autocommit) def commit(using=None): """Commit a transaction.""" get_connection(using).commit() def rollback(using=None): """Roll back a transaction.""" get_connection(using).rollback() def savepoint(using=None): """ Create a savepoint (if supported and required by the backend) inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. """ return get_connection(using).savepoint() def savepoint_rollback(sid, using=None): """ Roll back the most recent savepoint (if one exists). Do nothing if savepoints are not supported. """ get_connection(using).savepoint_rollback(sid) def savepoint_commit(sid, using=None): """ Commit the most recent savepoint (if one exists). Do nothing if savepoints are not supported. """ get_connection(using).savepoint_commit(sid) def clean_savepoints(using=None): """ Reset the counter used to generate unique savepoint ids in this thread. """ get_connection(using).clean_savepoints() def get_rollback(using=None): """Get the "needs rollback" flag -- for *advanced use* only.""" return get_connection(using).get_rollback() def set_rollback(rollback, using=None): """ Set or unset the "needs rollback" flag -- for *advanced use* only. When `rollback` is `True`, trigger a rollback when exiting the innermost enclosing atomic block that has `savepoint=True` (that's the default). Use this to force a rollback without raising an exception. When `rollback` is `False`, prevent such a rollback. Use this only after rolling back to a known-good state! Otherwise, you break the atomic block and data corruption may occur. """ return get_connection(using).set_rollback(rollback) @contextmanager def mark_for_rollback_on_error(using=None): """ Internal low-level utility to mark a transaction as "needs rollback" when an exception is raised while not enforcing the enclosed block to be in a transaction. This is needed by Model.save() and friends to avoid starting a transaction when in autocommit mode and a single query is executed. It's equivalent to: connection = get_connection(using) if connection.get_autocommit(): yield else: with transaction.atomic(using=using, savepoint=False): yield but it uses low-level utilities to avoid performance overhead. """ try: yield except Exception as exc: connection = get_connection(using) if connection.in_atomic_block: connection.needs_rollback = True connection.rollback_exc = exc raise def on_commit(func, using=None, robust=False): """ Register `func` to be called when the current transaction is committed. If the current transaction is rolled back, `func` will not be called. """ get_connection(using).on_commit(func, robust) ################################# # Decorators / context managers # ################################# class Atomic(ContextDecorator): """ Guarantee the atomic execution of a given block. An instance can be used either as a decorator or as a context manager. When it's used as a decorator, __call__ wraps the execution of the decorated function in the instance itself, used as a context manager. When it's used as a context manager, __enter__ creates a transaction or a savepoint, depending on whether a transaction is already in progress, and __exit__ commits the transaction or releases the savepoint on normal exit, and rolls back the transaction or to the savepoint on exceptions. It's possible to disable the creation of savepoints if the goal is to ensure that some code runs within a transaction without creating overhead. A stack of savepoints identifiers is maintained as an attribute of the connection. None denotes the absence of a savepoint. This allows reentrancy even if the same AtomicWrapper is reused. For example, it's possible to define `oa = atomic('other')` and use `@oa` or `with oa:` multiple times. Since database connections are thread-local, this is thread-safe. An atomic block can be tagged as durable. In this case, raise a RuntimeError if it's nested within another atomic block. This guarantees that database changes in a durable block are committed to the database when the block exists without error. This is a private API. """ def __init__(self, using, savepoint, durable): self.using = using self.savepoint = savepoint self.durable = durable self._from_testcase = False def __enter__(self): connection = get_connection(self.using) if ( self.durable and connection.atomic_blocks and not connection.atomic_blocks[-1]._from_testcase ): raise RuntimeError( "A durable atomic block cannot be nested within another " "atomic block." ) if not connection.in_atomic_block: # Reset state when entering an outermost atomic block. connection.commit_on_exit = True connection.needs_rollback = False if not connection.get_autocommit(): # Pretend we're already in an atomic block to bypass the code # that disables autocommit to enter a transaction, and make a # note to deal with this case in __exit__. connection.in_atomic_block = True connection.commit_on_exit = False if connection.in_atomic_block: # We're already in a transaction; create a savepoint, unless we # were told not to or we're already waiting for a rollback. The # second condition avoids creating useless savepoints and prevents # overwriting needs_rollback until the rollback is performed. if self.savepoint and not connection.needs_rollback: sid = connection.savepoint() connection.savepoint_ids.append(sid) else: connection.savepoint_ids.append(None) else: connection.set_autocommit( False, force_begin_transaction_with_broken_autocommit=True ) connection.in_atomic_block = True if connection.in_atomic_block: connection.atomic_blocks.append(self) def __exit__(self, exc_type, exc_value, traceback): connection = get_connection(self.using) if connection.in_atomic_block: connection.atomic_blocks.pop() if connection.savepoint_ids: sid = connection.savepoint_ids.pop() else: # Prematurely unset this flag to allow using commit or rollback. connection.in_atomic_block = False try: if connection.closed_in_transaction: # The database will perform a rollback by itself. # Wait until we exit the outermost block. pass elif exc_type is None and not connection.needs_rollback: if connection.in_atomic_block: # Release savepoint if there is one if sid is not None: try: connection.savepoint_commit(sid) except DatabaseError: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark for # rollback at a higher level and avoid shadowing # the original exception. connection.needs_rollback = True raise else: # Commit transaction try: connection.commit() except DatabaseError: try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() raise else: # This flag will be set to True again if there isn't a savepoint # allowing to perform the rollback at this level. connection.needs_rollback = False if connection.in_atomic_block: # Roll back to savepoint if there is one, mark for rollback # otherwise. if sid is None: connection.needs_rollback = True else: try: connection.savepoint_rollback(sid) # The savepoint won't be reused. Release it to # minimize overhead for the database server. connection.savepoint_commit(sid) except Error: # If rolling back to a savepoint fails, mark for # rollback at a higher level and avoid shadowing # the original exception. connection.needs_rollback = True else: # Roll back transaction try: connection.rollback() except Error: # An error during rollback means that something # went wrong with the connection. Drop it. connection.close() finally: # Outermost block exit when autocommit was enabled. if not connection.in_atomic_block: if connection.closed_in_transaction: connection.connection = None else: connection.set_autocommit(True) # Outermost block exit when autocommit was disabled. elif not connection.savepoint_ids and not connection.commit_on_exit: if connection.closed_in_transaction: connection.connection = None else: connection.in_atomic_block = False def atomic(using=None, savepoint=True, durable=False): # Bare decorator: @atomic -- although the first argument is called # `using`, it's actually the function being decorated. if callable(using): return Atomic(DEFAULT_DB_ALIAS, savepoint, durable)(using) # Decorator: @atomic(...) or context manager: with atomic(...): ... else: return Atomic(using, savepoint, durable) def _non_atomic_requests(view, using): try: view._non_atomic_requests.add(using) except AttributeError: view._non_atomic_requests = {using} return view def non_atomic_requests(using=None): if callable(using): return _non_atomic_requests(using, DEFAULT_DB_ALIAS) else: if using is None: using = DEFAULT_DB_ALIAS return lambda view: _non_atomic_requests(view, using)
castiel248/Convert
Lib/site-packages/django/db/transaction.py
Python
mit
12,504
import pkgutil from importlib import import_module from django.conf import settings from django.core.exceptions import ImproperlyConfigured # For backwards compatibility with Django < 3.2 from django.utils.connection import ConnectionDoesNotExist # NOQA: F401 from django.utils.connection import BaseConnectionHandler from django.utils.functional import cached_property from django.utils.module_loading import import_string DEFAULT_DB_ALIAS = "default" DJANGO_VERSION_PICKLE_KEY = "_django_version" class Error(Exception): pass class InterfaceError(Error): pass class DatabaseError(Error): pass class DataError(DatabaseError): pass class OperationalError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InternalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class DatabaseErrorWrapper: """ Context manager and decorator that reraises backend-specific database exceptions using Django's common wrappers. """ def __init__(self, wrapper): """ wrapper is a database wrapper. It must have a Database attribute defining PEP-249 exceptions. """ self.wrapper = wrapper def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: return for dj_exc_type in ( DataError, OperationalError, IntegrityError, InternalError, ProgrammingError, NotSupportedError, DatabaseError, InterfaceError, Error, ): db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__) if issubclass(exc_type, db_exc_type): dj_exc_value = dj_exc_type(*exc_value.args) # Only set the 'errors_occurred' flag for errors that may make # the connection unusable. if dj_exc_type not in (DataError, IntegrityError): self.wrapper.errors_occurred = True raise dj_exc_value.with_traceback(traceback) from exc_value def __call__(self, func): # Note that we are intentionally not using @wraps here for performance # reasons. Refs #21109. def inner(*args, **kwargs): with self: return func(*args, **kwargs) return inner def load_backend(backend_name): """ Return a database backend's "base" module given a fully qualified database backend name, or raise an error if it doesn't exist. """ # This backend was renamed in Django 1.9. if backend_name == "django.db.backends.postgresql_psycopg2": backend_name = "django.db.backends.postgresql" try: return import_module("%s.base" % backend_name) except ImportError as e_user: # The database backend wasn't found. Display a helpful error message # listing all built-in database backends. import django.db.backends builtin_backends = [ name for _, name, ispkg in pkgutil.iter_modules(django.db.backends.__path__) if ispkg and name not in {"base", "dummy"} ] if backend_name not in ["django.db.backends.%s" % b for b in builtin_backends]: backend_reprs = map(repr, sorted(builtin_backends)) raise ImproperlyConfigured( "%r isn't an available database backend or couldn't be " "imported. Check the above exception. To use one of the " "built-in backends, use 'django.db.backends.XXX', where XXX " "is one of:\n" " %s" % (backend_name, ", ".join(backend_reprs)) ) from e_user else: # If there's some other error, this must be an error in Django raise class ConnectionHandler(BaseConnectionHandler): settings_name = "DATABASES" # Connections needs to still be an actual thread local, as it's truly # thread-critical. Database backends should use @async_unsafe to protect # their code from async contexts, but this will give those contexts # separate connections in case it's needed as well. There's no cleanup # after async contexts, though, so we don't allow that if we can help it. thread_critical = True def configure_settings(self, databases): databases = super().configure_settings(databases) if databases == {}: databases[DEFAULT_DB_ALIAS] = {"ENGINE": "django.db.backends.dummy"} elif DEFAULT_DB_ALIAS not in databases: raise ImproperlyConfigured( f"You must define a '{DEFAULT_DB_ALIAS}' database." ) elif databases[DEFAULT_DB_ALIAS] == {}: databases[DEFAULT_DB_ALIAS]["ENGINE"] = "django.db.backends.dummy" # Configure default settings. for conn in databases.values(): conn.setdefault("ATOMIC_REQUESTS", False) conn.setdefault("AUTOCOMMIT", True) conn.setdefault("ENGINE", "django.db.backends.dummy") if conn["ENGINE"] == "django.db.backends." or not conn["ENGINE"]: conn["ENGINE"] = "django.db.backends.dummy" conn.setdefault("CONN_MAX_AGE", 0) conn.setdefault("CONN_HEALTH_CHECKS", False) conn.setdefault("OPTIONS", {}) conn.setdefault("TIME_ZONE", None) for setting in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]: conn.setdefault(setting, "") test_settings = conn.setdefault("TEST", {}) default_test_settings = [ ("CHARSET", None), ("COLLATION", None), ("MIGRATE", True), ("MIRROR", None), ("NAME", None), ] for key, value in default_test_settings: test_settings.setdefault(key, value) return databases @property def databases(self): # Maintained for backward compatibility as some 3rd party packages have # made use of this private API in the past. It is no longer used within # Django itself. return self.settings def create_connection(self, alias): db = self.settings[alias] backend = load_backend(db["ENGINE"]) return backend.DatabaseWrapper(db, alias) class ConnectionRouter: def __init__(self, routers=None): """ If routers is not specified, default to settings.DATABASE_ROUTERS. """ self._routers = routers @cached_property def routers(self): if self._routers is None: self._routers = settings.DATABASE_ROUTERS routers = [] for r in self._routers: if isinstance(r, str): router = import_string(r)() else: router = r routers.append(router) return routers def _router_func(action): def _route_db(self, model, **hints): chosen_db = None for router in self.routers: try: method = getattr(router, action) except AttributeError: # If the router doesn't have a method, skip to the next one. pass else: chosen_db = method(model, **hints) if chosen_db: return chosen_db instance = hints.get("instance") if instance is not None and instance._state.db: return instance._state.db return DEFAULT_DB_ALIAS return _route_db db_for_read = _router_func("db_for_read") db_for_write = _router_func("db_for_write") def allow_relation(self, obj1, obj2, **hints): for router in self.routers: try: method = router.allow_relation except AttributeError: # If the router doesn't have a method, skip to the next one. pass else: allow = method(obj1, obj2, **hints) if allow is not None: return allow return obj1._state.db == obj2._state.db def allow_migrate(self, db, app_label, **hints): for router in self.routers: try: method = router.allow_migrate except AttributeError: # If the router doesn't have a method, skip to the next one. continue allow = method(db, app_label, **hints) if allow is not None: return allow return True def allow_migrate_model(self, db, model): return self.allow_migrate( db, model._meta.app_label, model_name=model._meta.model_name, model=model, ) def get_migratable_models(self, app_config, db, include_auto_created=False): """Return app models allowed to be migrated on provided db.""" models = app_config.get_models(include_auto_created=include_auto_created) return [model for model in models if self.allow_migrate_model(db, model)]
castiel248/Convert
Lib/site-packages/django/db/utils.py
Python
mit
9,279
"""Multi-consumer multi-producer dispatching mechanism Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/ See license.txt for original license. Heavily modified for Django's purposes. """ from django.dispatch.dispatcher import Signal, receiver # NOQA
castiel248/Convert
Lib/site-packages/django/dispatch/__init__.py
Python
mit
286
import logging import threading import weakref from django.utils.inspect import func_accepts_kwargs logger = logging.getLogger("django.dispatch") def _make_id(target): if hasattr(target, "__func__"): return (id(target.__self__), id(target.__func__)) return id(target) NONE_ID = _make_id(None) # A marker for caching NO_RECEIVERS = object() class Signal: """ Base class for all signals Internal attributes: receivers { receiverkey (id) : weakref(receiver) } """ def __init__(self, use_caching=False): """ Create a new signal. """ self.receivers = [] self.lock = threading.Lock() self.use_caching = use_caching # For convenience we create empty caches even if they are not used. # A note about caching: if use_caching is defined, then for each # distinct sender we cache the receivers that sender has in # 'sender_receivers_cache'. The cache is cleaned when .connect() or # .disconnect() is called and populated on send(). self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {} self._dead_receivers = False def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be a Python object, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.configured and settings.DEBUG: if not callable(receiver): raise TypeError("Signal receivers must be callable.") # Check for **kwargs if not func_accepts_kwargs(receiver): raise ValueError( "Signal receivers must accept keyword arguments (**kwargs)." ) if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, "__self__") and hasattr(receiver, "__func__"): ref = weakref.WeakMethod receiver_object = receiver.__self__ receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) with self.lock: self._clear_dead_receivers() if not any(r_key == lookup_key for r_key, _ in self.receivers): self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear() def disconnect(self, receiver=None, sender=None, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be removed from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected def has_listeners(self, sender=None): return bool(self._live_receivers(sender)) def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. """ if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. Arguments: sender The sender of the signal. Can be any Python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. If any receiver raises an error (specifically any subclass of Exception), return the error instance as the result for that receiver. """ if ( not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS ): return [] # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as err: logger.error( "Error calling %s in Signal.send_robust() (%s)", receiver.__qualname__, err, exc_info=err, ) responses.append((receiver, err)) else: responses.append((receiver, response)) return responses def _clear_dead_receivers(self): # Note: caller is assumed to hold self.lock. if self._dead_receivers: self._dead_receivers = False self.receivers = [ r for r in self.receivers if not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None) ] def _live_receivers(self, sender): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this case in # .send() prior to calling _live_receivers() due to concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note, we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers def _remove_receiver(self, receiver=None): # Mark that the self.receivers list has dead weakrefs. If so, we will # clean those up in connect, disconnect and _live_receivers while # holding self.lock. Note that doing the cleanup here isn't a good # idea, _remove_receiver() will be called as side effect of garbage # collection, and so the call can happen while we are already holding # self.lock. self._dead_receivers = True def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... """ def _decorator(func): if isinstance(signal, (list, tuple)): for s in signal: s.connect(func, **kwargs) else: signal.connect(func, **kwargs) return func return _decorator
castiel248/Convert
Lib/site-packages/django/dispatch/dispatcher.py
Python
mit
10,793
""" Django validation and HTML form handling. """ from django.core.exceptions import ValidationError # NOQA from django.forms.boundfield import * # NOQA from django.forms.fields import * # NOQA from django.forms.forms import * # NOQA from django.forms.formsets import * # NOQA from django.forms.models import * # NOQA from django.forms.widgets import * # NOQA
castiel248/Convert
Lib/site-packages/django/forms/__init__.py
Python
mit
368
import re from django.core.exceptions import ValidationError from django.forms.utils import pretty_name from django.forms.widgets import MultiWidget, Textarea, TextInput from django.utils.functional import cached_property from django.utils.html import format_html, html_safe from django.utils.translation import gettext_lazy as _ __all__ = ("BoundField",) @html_safe class BoundField: "A Field plus data" def __init__(self, form, field, name): self.form = form self.field = field self.name = name self.html_name = form.add_prefix(name) self.html_initial_name = form.add_initial_prefix(name) self.html_initial_id = form.add_initial_prefix(self.auto_id) if self.field.label is None: self.label = pretty_name(name) else: self.label = self.field.label self.help_text = field.help_text or "" def __str__(self): """Render this field as an HTML widget.""" if self.field.show_hidden_initial: return self.as_widget() + self.as_hidden(only_initial=True) return self.as_widget() @cached_property def subwidgets(self): """ Most widgets yield a single subwidget, but others like RadioSelect and CheckboxSelectMultiple produce one subwidget for each choice. This property is cached so that only one database query occurs when rendering ModelChoiceFields. """ id_ = self.field.widget.attrs.get("id") or self.auto_id attrs = {"id": id_} if id_ else {} attrs = self.build_widget_attrs(attrs) return [ BoundWidget(self.field.widget, widget, self.form.renderer) for widget in self.field.widget.subwidgets( self.html_name, self.value(), attrs=attrs ) ] def __bool__(self): # BoundField evaluates to True even if it doesn't have subwidgets. return True def __iter__(self): return iter(self.subwidgets) def __len__(self): return len(self.subwidgets) def __getitem__(self, idx): # Prevent unnecessary reevaluation when accessing BoundField's attrs # from templates. if not isinstance(idx, (int, slice)): raise TypeError( "BoundField indices must be integers or slices, not %s." % type(idx).__name__ ) return self.subwidgets[idx] @property def errors(self): """ Return an ErrorList (empty if there are no errors) for this field. """ return self.form.errors.get( self.name, self.form.error_class(renderer=self.form.renderer) ) def as_widget(self, widget=None, attrs=None, only_initial=False): """ Render the field by rendering the passed widget, adding any HTML attributes passed as attrs. If a widget isn't specified, use the field's default widget. """ widget = widget or self.field.widget if self.field.localize: widget.is_localized = True attrs = attrs or {} attrs = self.build_widget_attrs(attrs, widget) if self.auto_id and "id" not in widget.attrs: attrs.setdefault( "id", self.html_initial_id if only_initial else self.auto_id ) if only_initial and self.html_initial_name in self.form.data: # Propagate the hidden initial value. value = self.form._widget_data_value( self.field.hidden_widget(), self.html_initial_name, ) else: value = self.value() return widget.render( name=self.html_initial_name if only_initial else self.html_name, value=value, attrs=attrs, renderer=self.form.renderer, ) def as_text(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="text">. """ return self.as_widget(TextInput(), attrs, **kwargs) def as_textarea(self, attrs=None, **kwargs): """Return a string of HTML for representing this as a <textarea>.""" return self.as_widget(Textarea(), attrs, **kwargs) def as_hidden(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="hidden">. """ return self.as_widget(self.field.hidden_widget(), attrs, **kwargs) @property def data(self): """ Return the data for this BoundField, or None if it wasn't given. """ return self.form._widget_data_value(self.field.widget, self.html_name) def value(self): """ Return the value for this BoundField, using the initial value if the form is not bound or the data otherwise. """ data = self.initial if self.form.is_bound: data = self.field.bound_data(self.data, data) return self.field.prepare_value(data) def _has_changed(self): field = self.field if field.show_hidden_initial: hidden_widget = field.hidden_widget() initial_value = self.form._widget_data_value( hidden_widget, self.html_initial_name, ) try: initial_value = field.to_python(initial_value) except ValidationError: # Always assume data has changed if validation fails. return True else: initial_value = self.initial return field.has_changed(initial_value, self.data) def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None): """ Wrap the given contents in a <label>, if the field has an ID attribute. contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the <label> tag. label_suffix overrides the form's label_suffix. """ contents = contents or self.label if label_suffix is None: label_suffix = ( self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix ) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation # characters will prevent the default label_suffix to be appended to the label if label_suffix and contents and contents[-1] not in _(":?.!"): contents = format_html("{}{}", contents, label_suffix) widget = self.field.widget id_ = widget.attrs.get("id") or self.auto_id if id_: id_for_label = widget.id_for_label(id_) if id_for_label: attrs = {**(attrs or {}), "for": id_for_label} if self.field.required and hasattr(self.form, "required_css_class"): attrs = attrs or {} if "class" in attrs: attrs["class"] += " " + self.form.required_css_class else: attrs["class"] = self.form.required_css_class context = { "field": self, "label": contents, "attrs": attrs, "use_tag": bool(id_), "tag": tag or "label", } return self.form.render(self.form.template_name_label, context) def legend_tag(self, contents=None, attrs=None, label_suffix=None): """ Wrap the given contents in a <legend>, if the field has an ID attribute. Contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the <legend> tag. label_suffix overrides the form's label_suffix. """ return self.label_tag(contents, attrs, label_suffix, tag="legend") def css_classes(self, extra_classes=None): """ Return a string of space-separated CSS classes for this field. """ if hasattr(extra_classes, "split"): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, "error_css_class"): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, "required_css_class"): extra_classes.add(self.form.required_css_class) return " ".join(extra_classes) @property def is_hidden(self): """Return True if this BoundField's widget is hidden.""" return self.field.widget.is_hidden @property def auto_id(self): """ Calculate and return the ID attribute for this BoundField, if the associated Form has specified auto_id. Return an empty string otherwise. """ auto_id = self.form.auto_id # Boolean or string if auto_id and "%s" in str(auto_id): return auto_id % self.html_name elif auto_id: return self.html_name return "" @property def id_for_label(self): """ Wrapper around the field widget's `id_for_label` method. Useful, for example, for focusing on this field regardless of whether it has a single widget or a MultiWidget. """ widget = self.field.widget id_ = widget.attrs.get("id") or self.auto_id return widget.id_for_label(id_) @cached_property def initial(self): return self.form.get_initial_for_field(self.field, self.name) def build_widget_attrs(self, attrs, widget=None): widget = widget or self.field.widget attrs = dict(attrs) # Copy attrs to avoid modifying the argument. if ( widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute ): # MultiValueField has require_all_fields: if False, fall back # on subfields. if ( hasattr(self.field, "require_all_fields") and not self.field.require_all_fields and isinstance(self.field.widget, MultiWidget) ): for subfield, subwidget in zip(self.field.fields, widget.widgets): subwidget.attrs["required"] = ( subwidget.use_required_attribute(self.initial) and subfield.required ) else: attrs["required"] = True if self.field.disabled: attrs["disabled"] = True return attrs @property def widget_type(self): return re.sub( r"widget$|input$", "", self.field.widget.__class__.__name__.lower() ) @property def use_fieldset(self): """ Return the value of this BoundField widget's use_fieldset attribute. """ return self.field.widget.use_fieldset @html_safe class BoundWidget: """ A container class used for iterating over widgets. This is useful for widgets that have choices. For example, the following can be used in a template: {% for radio in myform.beatles %} <label for="{{ radio.id_for_label }}"> {{ radio.choice_label }} <span class="radio">{{ radio.tag }}</span> </label> {% endfor %} """ def __init__(self, parent_widget, data, renderer): self.parent_widget = parent_widget self.data = data self.renderer = renderer def __str__(self): return self.tag(wrap_label=True) def tag(self, wrap_label=False): context = {"widget": {**self.data, "wrap_label": wrap_label}} return self.parent_widget._render(self.template_name, context, self.renderer) @property def template_name(self): if "template_name" in self.data: return self.data["template_name"] return self.parent_widget.template_name @property def id_for_label(self): return self.data["attrs"].get("id") @property def choice_label(self): return self.data["label"]
castiel248/Convert
Lib/site-packages/django/forms/boundfield.py
Python
mit
12,344
""" Field classes. """ import copy import datetime import json import math import operator import os import re import uuid from decimal import Decimal, DecimalException from io import BytesIO from urllib.parse import urlsplit, urlunsplit from django.core import validators from django.core.exceptions import ValidationError from django.forms.boundfield import BoundField from django.forms.utils import from_current_timezone, to_current_timezone from django.forms.widgets import ( FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput, DateTimeInput, EmailInput, FileInput, HiddenInput, MultipleHiddenInput, NullBooleanSelect, NumberInput, Select, SelectMultiple, SplitDateTimeWidget, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeInput, URLInput, ) from django.utils import formats from django.utils.dateparse import parse_datetime, parse_duration from django.utils.duration import duration_string from django.utils.ipv6 import clean_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext_lazy __all__ = ( "Field", "CharField", "IntegerField", "DateField", "TimeField", "DateTimeField", "DurationField", "RegexField", "EmailField", "FileField", "ImageField", "URLField", "BooleanField", "NullBooleanField", "ChoiceField", "MultipleChoiceField", "ComboField", "MultiValueField", "FloatField", "DecimalField", "SplitDateTimeField", "GenericIPAddressField", "FilePathField", "JSONField", "SlugField", "TypedChoiceField", "TypedMultipleChoiceField", "UUIDField", ) class Field: widget = TextInput # Default widget to use when rendering this type of Field. hidden_widget = ( HiddenInput # Default widget to use when rendering this as "hidden". ) default_validators = [] # Default set of validators # Add an 'invalid' entry to default_error_message if you want a specific # field error message not raised by the field validators. default_error_messages = { "required": _("This field is required."), } empty_values = list(validators.EMPTY_VALUES) def __init__( self, *, required=True, widget=None, label=None, initial=None, help_text="", error_messages=None, show_hidden_initial=False, validators=(), localize=False, disabled=False, label_suffix=None, ): # required -- Boolean that specifies whether the field is required. # True by default. # widget -- A Widget class, or instance of a Widget class, that should # be used for this Field when displaying it. Each Field has a # default Widget that it'll use if you don't specify this. In # most cases, the default widget is TextInput. # label -- A verbose name for this field, for use in displaying this # field in a form. By default, Django will use a "pretty" # version of the form field name, if the Field is part of a # Form. # initial -- A value to use in this Field's initial display. This value # is *not* used as a fallback if data isn't given. # help_text -- An optional string to use as "help text" for this Field. # error_messages -- An optional dictionary to override the default # messages that the field will raise. # show_hidden_initial -- Boolean that specifies if it is needed to render a # hidden widget with initial value after widget. # validators -- List of additional validators to use # localize -- Boolean that specifies if the field should be localized. # disabled -- Boolean that specifies whether the field is disabled, that # is its widget is shown in the form but not editable. # label_suffix -- Suffix to be added to the label. Overrides # form's label_suffix. self.required, self.label, self.initial = required, label, initial self.show_hidden_initial = show_hidden_initial self.help_text = help_text self.disabled = disabled self.label_suffix = label_suffix widget = widget or self.widget if isinstance(widget, type): widget = widget() else: widget = copy.deepcopy(widget) # Trigger the localization machinery if needed. self.localize = localize if self.localize: widget.is_localized = True # Let the widget know whether it should display as required. widget.is_required = self.required # Hook into self.widget_attrs() for any Field-specific HTML attributes. extra_attrs = self.widget_attrs(widget) if extra_attrs: widget.attrs.update(extra_attrs) self.widget = widget messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, "default_error_messages", {})) messages.update(error_messages or {}) self.error_messages = messages self.validators = [*self.default_validators, *validators] super().__init__() def prepare_value(self, value): return value def to_python(self, value): return value def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages["required"], code="required") def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except ValidationError as e: if hasattr(e, "code") and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise ValidationError(errors) def clean(self, value): """ Validate the given value and return its "cleaned" value as an appropriate Python object. Raise ValidationError for any errors. """ value = self.to_python(value) self.validate(value) self.run_validators(value) return value def bound_data(self, data, initial): """ Return the value that should be shown for this field on render of a bound form, given the submitted POST data for the field and the initial data, if any. For most fields, this will simply be data; FileFields need to handle it a bit differently. """ if self.disabled: return initial return data def widget_attrs(self, widget): """ Given a Widget instance (*not* a Widget class), return a dictionary of any HTML attributes that should be added to the Widget, based on this Field. """ return {} def has_changed(self, initial, data): """Return True if data differs from initial.""" # Always return False if the field is disabled since self.bound_data # always uses the initial value in this case. if self.disabled: return False try: data = self.to_python(data) if hasattr(self, "_coerce"): return self._coerce(data) != self._coerce(initial) except ValidationError: return True # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or initial value we get # is None, replace it with ''. initial_value = initial if initial is not None else "" data_value = data if data is not None else "" return initial_value != data_value def get_bound_field(self, form, field_name): """ Return a BoundField instance that will be used when accessing the form field in a template. """ return BoundField(form, self, field_name) def __deepcopy__(self, memo): result = copy.copy(self) memo[id(self)] = result result.widget = copy.deepcopy(self.widget, memo) result.error_messages = self.error_messages.copy() result.validators = self.validators[:] return result class CharField(Field): def __init__( self, *, max_length=None, min_length=None, strip=True, empty_value="", **kwargs ): self.max_length = max_length self.min_length = min_length self.strip = strip self.empty_value = empty_value super().__init__(**kwargs) if min_length is not None: self.validators.append(validators.MinLengthValidator(int(min_length))) if max_length is not None: self.validators.append(validators.MaxLengthValidator(int(max_length))) self.validators.append(validators.ProhibitNullCharactersValidator()) def to_python(self, value): """Return a string.""" if value not in self.empty_values: value = str(value) if self.strip: value = value.strip() if value in self.empty_values: return self.empty_value return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if self.max_length is not None and not widget.is_hidden: # The HTML attribute is maxlength, not max_length. attrs["maxlength"] = str(self.max_length) if self.min_length is not None and not widget.is_hidden: # The HTML attribute is minlength, not min_length. attrs["minlength"] = str(self.min_length) return attrs class IntegerField(Field): widget = NumberInput default_error_messages = { "invalid": _("Enter a whole number."), } re_decimal = _lazy_re_compile(r"\.0*\s*$") def __init__(self, *, max_value=None, min_value=None, step_size=None, **kwargs): self.max_value, self.min_value, self.step_size = max_value, min_value, step_size if kwargs.get("localize") and self.widget == NumberInput: # Localized number input is not well supported on most browsers kwargs.setdefault("widget", super().widget) super().__init__(**kwargs) if max_value is not None: self.validators.append(validators.MaxValueValidator(max_value)) if min_value is not None: self.validators.append(validators.MinValueValidator(min_value)) if step_size is not None: self.validators.append(validators.StepValueValidator(step_size)) def to_python(self, value): """ Validate that int() can be called on the input. Return the result of int() or None for empty values. """ value = super().to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) # Strip trailing decimal and zeros. try: value = int(self.re_decimal.sub("", str(value))) except (ValueError, TypeError): raise ValidationError(self.error_messages["invalid"], code="invalid") return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput): if self.min_value is not None: attrs["min"] = self.min_value if self.max_value is not None: attrs["max"] = self.max_value if self.step_size is not None: attrs["step"] = self.step_size return attrs class FloatField(IntegerField): default_error_messages = { "invalid": _("Enter a number."), } def to_python(self, value): """ Validate that float() can be called on the input. Return the result of float() or None for empty values. """ value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = float(value) except (ValueError, TypeError): raise ValidationError(self.error_messages["invalid"], code="invalid") return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not math.isfinite(value): raise ValidationError(self.error_messages["invalid"], code="invalid") def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and "step" not in widget.attrs: if self.step_size is not None: step = str(self.step_size) else: step = "any" attrs.setdefault("step", step) return attrs class DecimalField(IntegerField): default_error_messages = { "invalid": _("Enter a number."), } def __init__( self, *, max_value=None, min_value=None, max_digits=None, decimal_places=None, **kwargs, ): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(max_value=max_value, min_value=min_value, **kwargs) self.validators.append(validators.DecimalValidator(max_digits, decimal_places)) def to_python(self, value): """ Validate that the input is a decimal number. Return a Decimal instance or None for empty values. Ensure that there are no more than max_digits in the number and no more than decimal_places digits after the decimal point. """ if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = Decimal(str(value)) except DecimalException: raise ValidationError(self.error_messages["invalid"], code="invalid") return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not value.is_finite(): raise ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and "step" not in widget.attrs: if self.decimal_places is not None: # Use exponential notation for small values since they might # be parsed as 0 otherwise. ref #20765 step = str(Decimal(1).scaleb(-self.decimal_places)).lower() else: step = "any" attrs.setdefault("step", step) return attrs class BaseTemporalField(Field): def __init__(self, *, input_formats=None, **kwargs): super().__init__(**kwargs) if input_formats is not None: self.input_formats = input_formats def to_python(self, value): value = value.strip() # Try to strptime against each input format. for format in self.input_formats: try: return self.strptime(value, format) except (ValueError, TypeError): continue raise ValidationError(self.error_messages["invalid"], code="invalid") def strptime(self, value, format): raise NotImplementedError("Subclasses must define this method.") class DateField(BaseTemporalField): widget = DateInput input_formats = formats.get_format_lazy("DATE_INPUT_FORMATS") default_error_messages = { "invalid": _("Enter a valid date."), } def to_python(self, value): """ Validate that the input can be converted to a date. Return a Python datetime.date object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).date() class TimeField(BaseTemporalField): widget = TimeInput input_formats = formats.get_format_lazy("TIME_INPUT_FORMATS") default_error_messages = {"invalid": _("Enter a valid time.")} def to_python(self, value): """ Validate that the input can be converted to a time. Return a Python datetime.time object. """ if value in self.empty_values: return None if isinstance(value, datetime.time): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).time() class DateTimeFormatsIterator: def __iter__(self): yield from formats.get_format("DATETIME_INPUT_FORMATS") yield from formats.get_format("DATE_INPUT_FORMATS") class DateTimeField(BaseTemporalField): widget = DateTimeInput input_formats = DateTimeFormatsIterator() default_error_messages = { "invalid": _("Enter a valid date/time."), } def prepare_value(self, value): if isinstance(value, datetime.datetime): value = to_current_timezone(value) return value def to_python(self, value): """ Validate that the input can be converted to a datetime. Return a Python datetime.datetime object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return from_current_timezone(value) if isinstance(value, datetime.date): result = datetime.datetime(value.year, value.month, value.day) return from_current_timezone(result) try: result = parse_datetime(value.strip()) except ValueError: raise ValidationError(self.error_messages["invalid"], code="invalid") if not result: result = super().to_python(value) return from_current_timezone(result) def strptime(self, value, format): return datetime.datetime.strptime(value, format) class DurationField(Field): default_error_messages = { "invalid": _("Enter a valid duration."), "overflow": _("The number of days must be between {min_days} and {max_days}."), } def prepare_value(self, value): if isinstance(value, datetime.timedelta): return duration_string(value) return value def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.timedelta): return value try: value = parse_duration(str(value)) except OverflowError: raise ValidationError( self.error_messages["overflow"].format( min_days=datetime.timedelta.min.days, max_days=datetime.timedelta.max.days, ), code="overflow", ) if value is None: raise ValidationError(self.error_messages["invalid"], code="invalid") return value class RegexField(CharField): def __init__(self, regex, **kwargs): """ regex can be either a string or a compiled regular expression object. """ kwargs.setdefault("strip", False) super().__init__(**kwargs) self._set_regex(regex) def _get_regex(self): return self._regex def _set_regex(self, regex): if isinstance(regex, str): regex = re.compile(regex) self._regex = regex if ( hasattr(self, "_regex_validator") and self._regex_validator in self.validators ): self.validators.remove(self._regex_validator) self._regex_validator = validators.RegexValidator(regex=regex) self.validators.append(self._regex_validator) regex = property(_get_regex, _set_regex) class EmailField(CharField): widget = EmailInput default_validators = [validators.validate_email] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) class FileField(Field): widget = ClearableFileInput default_error_messages = { "invalid": _("No file was submitted. Check the encoding type on the form."), "missing": _("No file was submitted."), "empty": _("The submitted file is empty."), "max_length": ngettext_lazy( "Ensure this filename has at most %(max)d character (it has %(length)d).", "Ensure this filename has at most %(max)d characters (it has %(length)d).", "max", ), "contradiction": _( "Please either submit a file or check the clear checkbox, not both." ), } def __init__(self, *, max_length=None, allow_empty_file=False, **kwargs): self.max_length = max_length self.allow_empty_file = allow_empty_file super().__init__(**kwargs) def to_python(self, data): if data in self.empty_values: return None # UploadedFile objects should have name and size attributes. try: file_name = data.name file_size = data.size except AttributeError: raise ValidationError(self.error_messages["invalid"], code="invalid") if self.max_length is not None and len(file_name) > self.max_length: params = {"max": self.max_length, "length": len(file_name)} raise ValidationError( self.error_messages["max_length"], code="max_length", params=params ) if not file_name: raise ValidationError(self.error_messages["invalid"], code="invalid") if not self.allow_empty_file and not file_size: raise ValidationError(self.error_messages["empty"], code="empty") return data def clean(self, data, initial=None): # If the widget got contradictory inputs, we raise a validation error if data is FILE_INPUT_CONTRADICTION: raise ValidationError( self.error_messages["contradiction"], code="contradiction" ) # False means the field value should be cleared; further validation is # not needed. if data is False: if not self.required: return False # If the field is required, clearing is not possible (the widget # shouldn't return False data in that case anyway). False is not # in self.empty_value; if a False value makes it this far # it should be validated from here on out as None (so it will be # caught by the required check). data = None if not data and initial: return initial return super().clean(data) def bound_data(self, _, initial): return initial def has_changed(self, initial, data): return not self.disabled and data is not None class ImageField(FileField): default_validators = [validators.validate_image_file_extension] default_error_messages = { "invalid_image": _( "Upload a valid image. The file you uploaded was either not an " "image or a corrupted image." ), } def to_python(self, data): """ Check that the file-upload field data contains a valid image (GIF, JPG, PNG, etc. -- whatever Pillow supports). """ f = super().to_python(data) if f is None: return None from PIL import Image # We need to get a file object for Pillow. We might have a path or we might # have to read the data into memory. if hasattr(data, "temporary_file_path"): file = data.temporary_file_path() else: if hasattr(data, "read"): file = BytesIO(data.read()) else: file = BytesIO(data["content"]) try: # load() could spot a truncated JPEG, but it loads the entire # image in memory, which is a DoS vector. See #3848 and #18520. image = Image.open(file) # verify() must be called immediately after the constructor. image.verify() # Annotating so subclasses can reuse it for their own validation f.image = image # Pillow doesn't detect the MIME type of all formats. In those # cases, content_type will be None. f.content_type = Image.MIME.get(image.format) except Exception as exc: # Pillow doesn't recognize it as an image. raise ValidationError( self.error_messages["invalid_image"], code="invalid_image", ) from exc if hasattr(f, "seek") and callable(f.seek): f.seek(0) return f def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, FileInput) and "accept" not in widget.attrs: attrs.setdefault("accept", "image/*") return attrs class URLField(CharField): widget = URLInput default_error_messages = { "invalid": _("Enter a valid URL."), } default_validators = [validators.URLValidator()] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) def to_python(self, value): def split_url(url): """ Return a list of url parts via urlparse.urlsplit(), or raise ValidationError for some malformed URLs. """ try: return list(urlsplit(url)) except ValueError: # urlparse.urlsplit can raise a ValueError with some # misformatted URLs. raise ValidationError(self.error_messages["invalid"], code="invalid") value = super().to_python(value) if value: url_fields = split_url(value) if not url_fields[0]: # If no URL scheme given, assume http:// url_fields[0] = "http" if not url_fields[1]: # Assume that if no domain is provided, that the path segment # contains the domain. url_fields[1] = url_fields[2] url_fields[2] = "" # Rebuild the url_fields list, since the domain segment may now # contain the path too. url_fields = split_url(urlunsplit(url_fields)) value = urlunsplit(url_fields) return value class BooleanField(Field): widget = CheckboxInput def to_python(self, value): """Return a Python boolean object.""" # Explicitly check for the string 'False', which is what a hidden field # will submit for False. Also check for '0', since this is what # RadioSelect will provide. Because bool("True") == bool('1') == True, # we don't need to handle that explicitly. if isinstance(value, str) and value.lower() in ("false", "0"): value = False else: value = bool(value) return super().to_python(value) def validate(self, value): if not value and self.required: raise ValidationError(self.error_messages["required"], code="required") def has_changed(self, initial, data): if self.disabled: return False # Sometimes data or initial may be a string equivalent of a boolean # so we should run it through to_python first to get a boolean value return self.to_python(initial) != self.to_python(data) class NullBooleanField(BooleanField): """ A field whose valid values are None, True, and False. Clean invalid values to None. """ widget = NullBooleanSelect def to_python(self, value): """ Explicitly check for the string 'True' and 'False', which is what a hidden field will submit for True and False, for 'true' and 'false', which are likely to be returned by JavaScript serializations of forms, and for '1' and '0', which is what a RadioField will submit. Unlike the Booleanfield, this field must check for True because it doesn't use the bool() function. """ if value in (True, "True", "true", "1"): return True elif value in (False, "False", "false", "0"): return False else: return None def validate(self, value): pass class CallableChoiceIterator: def __init__(self, choices_func): self.choices_func = choices_func def __iter__(self): yield from self.choices_func() class ChoiceField(Field): widget = Select default_error_messages = { "invalid_choice": _( "Select a valid choice. %(value)s is not one of the available choices." ), } def __init__(self, *, choices=(), **kwargs): super().__init__(**kwargs) self.choices = choices def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result._choices = copy.deepcopy(self._choices, memo) return result def _get_choices(self): return self._choices def _set_choices(self, value): # Setting choices also sets the choices on the widget. # choices can be any iterable, but we call list() on it because # it will be consumed more than once. if callable(value): value = CallableChoiceIterator(value) else: value = list(value) self._choices = self.widget.choices = value choices = property(_get_choices, _set_choices) def to_python(self, value): """Return a string.""" if value in self.empty_values: return "" return str(value) def validate(self, value): """Validate that the input is in self.choices.""" super().validate(value) if value and not self.valid_value(value): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) def valid_value(self, value): """Check to see if the provided value is a valid choice.""" text_value = str(value) for k, v in self.choices: if isinstance(v, (list, tuple)): # This is an optgroup, so look inside the group for options for k2, v2 in v: if value == k2 or text_value == str(k2): return True else: if value == k or text_value == str(k): return True return False class TypedChoiceField(ChoiceField): def __init__(self, *, coerce=lambda val: val, empty_value="", **kwargs): self.coerce = coerce self.empty_value = empty_value super().__init__(**kwargs) def _coerce(self, value): """ Validate that the value can be coerced to the right type (if not empty). """ if value == self.empty_value or value in self.empty_values: return self.empty_value try: value = self.coerce(value) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": value}, ) return value def clean(self, value): value = super().clean(value) return self._coerce(value) class MultipleChoiceField(ChoiceField): hidden_widget = MultipleHiddenInput widget = SelectMultiple default_error_messages = { "invalid_choice": _( "Select a valid choice. %(value)s is not one of the available choices." ), "invalid_list": _("Enter a list of values."), } def to_python(self, value): if not value: return [] elif not isinstance(value, (list, tuple)): raise ValidationError( self.error_messages["invalid_list"], code="invalid_list" ) return [str(val) for val in value] def validate(self, value): """Validate that the input is a list or tuple.""" if self.required and not value: raise ValidationError(self.error_messages["required"], code="required") # Validate that each value in the value list is in self.choices. for val in value: if not self.valid_value(val): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": val}, ) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in initial} data_set = {str(value) for value in data} return data_set != initial_set class TypedMultipleChoiceField(MultipleChoiceField): def __init__(self, *, coerce=lambda val: val, **kwargs): self.coerce = coerce self.empty_value = kwargs.pop("empty_value", []) super().__init__(**kwargs) def _coerce(self, value): """ Validate that the values are in self.choices and can be coerced to the right type. """ if value == self.empty_value or value in self.empty_values: return self.empty_value new_value = [] for choice in value: try: new_value.append(self.coerce(choice)) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages["invalid_choice"], code="invalid_choice", params={"value": choice}, ) return new_value def clean(self, value): value = super().clean(value) return self._coerce(value) def validate(self, value): if value != self.empty_value: super().validate(value) elif self.required: raise ValidationError(self.error_messages["required"], code="required") class ComboField(Field): """ A Field whose clean() method calls multiple Field clean() methods. """ def __init__(self, fields, **kwargs): super().__init__(**kwargs) # Set 'required' to False on the individual fields, because the # required validation will be handled by ComboField, not by those # individual fields. for f in fields: f.required = False self.fields = fields def clean(self, value): """ Validate the given value against all of self.fields, which is a list of Field instances. """ super().clean(value) for field in self.fields: value = field.clean(value) return value class MultiValueField(Field): """ Aggregate the logic of multiple Fields. Its clean() method takes a "decompressed" list of values, which are then cleaned into a single value according to self.fields. Each value in this list is cleaned by the corresponding field -- the first value is cleaned by the first field, the second value is cleaned by the second field, etc. Once all fields are cleaned, the list of clean values is "compressed" into a single value. Subclasses should not have to implement clean(). Instead, they must implement compress(), which takes a list of valid values and returns a "compressed" version of those values -- a single value. You'll probably want to use this with MultiWidget. """ default_error_messages = { "invalid": _("Enter a list of values."), "incomplete": _("Enter a complete value."), } def __init__(self, fields, *, require_all_fields=True, **kwargs): self.require_all_fields = require_all_fields super().__init__(**kwargs) for f in fields: f.error_messages.setdefault("incomplete", self.error_messages["incomplete"]) if self.disabled: f.disabled = True if self.require_all_fields: # Set 'required' to False on the individual fields, because the # required validation will be handled by MultiValueField, not # by those individual fields. f.required = False self.fields = fields def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result.fields = tuple(x.__deepcopy__(memo) for x in self.fields) return result def validate(self, value): pass def clean(self, value): """ Validate every value in the given list. A value is validated against the corresponding Field in self.fields. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), clean() would call DateField.clean(value[0]) and TimeField.clean(value[1]). """ clean_data = [] errors = [] if self.disabled and not isinstance(value, list): value = self.widget.decompress(value) if not value or isinstance(value, (list, tuple)): if not value or not [v for v in value if v not in self.empty_values]: if self.required: raise ValidationError( self.error_messages["required"], code="required" ) else: return self.compress([]) else: raise ValidationError(self.error_messages["invalid"], code="invalid") for i, field in enumerate(self.fields): try: field_value = value[i] except IndexError: field_value = None if field_value in self.empty_values: if self.require_all_fields: # Raise a 'required' error if the MultiValueField is # required and any field is empty. if self.required: raise ValidationError( self.error_messages["required"], code="required" ) elif field.required: # Otherwise, add an 'incomplete' error to the list of # collected errors and skip field cleaning, if a required # field is empty. if field.error_messages["incomplete"] not in errors: errors.append(field.error_messages["incomplete"]) continue try: clean_data.append(field.clean(field_value)) except ValidationError as e: # Collect all validation errors in a single list, which we'll # raise at the end of clean(), rather than raising a single # exception for the first error we encounter. Skip duplicates. errors.extend(m for m in e.error_list if m not in errors) if errors: raise ValidationError(errors) out = self.compress(clean_data) self.validate(out) self.run_validators(out) return out def compress(self, data_list): """ Return a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list. """ raise NotImplementedError("Subclasses must implement this method.") def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = ["" for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.widget.decompress(initial) for field, initial, data in zip(self.fields, initial, data): try: initial = field.to_python(initial) except ValidationError: return True if field.has_changed(initial, data): return True return False class FilePathField(ChoiceField): def __init__( self, path, *, match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs, ): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders super().__init__(choices=(), **kwargs) if self.required: self.choices = [] else: self.choices = [("", "---------")] if self.match is not None: self.match_re = re.compile(self.match) if recursive: for root, dirs, files in sorted(os.walk(self.path)): if self.allow_files: for f in sorted(files): if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) if self.allow_folders: for f in sorted(dirs): if f == "__pycache__": continue if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) else: choices = [] with os.scandir(self.path) as entries: for f in entries: if f.name == "__pycache__": continue if ( (self.allow_files and f.is_file()) or (self.allow_folders and f.is_dir()) ) and (self.match is None or self.match_re.search(f.name)): choices.append((f.path, f.name)) choices.sort(key=operator.itemgetter(1)) self.choices.extend(choices) self.widget.choices = self.choices class SplitDateTimeField(MultiValueField): widget = SplitDateTimeWidget hidden_widget = SplitHiddenDateTimeWidget default_error_messages = { "invalid_date": _("Enter a valid date."), "invalid_time": _("Enter a valid time."), } def __init__(self, *, input_date_formats=None, input_time_formats=None, **kwargs): errors = self.default_error_messages.copy() if "error_messages" in kwargs: errors.update(kwargs["error_messages"]) localize = kwargs.get("localize", False) fields = ( DateField( input_formats=input_date_formats, error_messages={"invalid": errors["invalid_date"]}, localize=localize, ), TimeField( input_formats=input_time_formats, error_messages={"invalid": errors["invalid_time"]}, localize=localize, ), ) super().__init__(fields, **kwargs) def compress(self, data_list): if data_list: # Raise a validation error if time or date is empty # (possible if SplitDateTimeField has required=False). if data_list[0] in self.empty_values: raise ValidationError( self.error_messages["invalid_date"], code="invalid_date" ) if data_list[1] in self.empty_values: raise ValidationError( self.error_messages["invalid_time"], code="invalid_time" ) result = datetime.datetime.combine(*data_list) return from_current_timezone(result) return None class GenericIPAddressField(CharField): def __init__(self, *, protocol="both", unpack_ipv4=False, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.default_validators = validators.ip_address_validators( protocol, unpack_ipv4 )[0] super().__init__(**kwargs) def to_python(self, value): if value in self.empty_values: return "" value = value.strip() if value and ":" in value: return clean_ipv6_address(value, self.unpack_ipv4) return value class SlugField(CharField): default_validators = [validators.validate_slug] def __init__(self, *, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(**kwargs) class UUIDField(CharField): default_error_messages = { "invalid": _("Enter a valid UUID."), } def prepare_value(self, value): if isinstance(value, uuid.UUID): return str(value) return value def to_python(self, value): value = super().to_python(value) if value in self.empty_values: return None if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except ValueError: raise ValidationError(self.error_messages["invalid"], code="invalid") return value class InvalidJSONInput(str): pass class JSONString(str): pass class JSONField(CharField): default_error_messages = { "invalid": _("Enter a valid JSON."), } widget = Textarea def __init__(self, encoder=None, decoder=None, **kwargs): self.encoder = encoder self.decoder = decoder super().__init__(**kwargs) def to_python(self, value): if self.disabled: return value if value in self.empty_values: return None elif isinstance(value, (list, dict, int, float, JSONString)): return value try: converted = json.loads(value, cls=self.decoder) except json.JSONDecodeError: raise ValidationError( self.error_messages["invalid"], code="invalid", params={"value": value}, ) if isinstance(converted, str): return JSONString(converted) else: return converted def bound_data(self, data, initial): if self.disabled: return initial if data is None: return None try: return json.loads(data, cls=self.decoder) except json.JSONDecodeError: return InvalidJSONInput(data) def prepare_value(self, value): if isinstance(value, InvalidJSONInput): return value return json.dumps(value, ensure_ascii=False, cls=self.encoder) def has_changed(self, initial, data): if super().has_changed(initial, data): return True # For purposes of seeing whether something has changed, True isn't the # same as 1 and the order of keys doesn't matter. return json.dumps(initial, sort_keys=True, cls=self.encoder) != json.dumps( self.to_python(data), sort_keys=True, cls=self.encoder )
castiel248/Convert
Lib/site-packages/django/forms/fields.py
Python
mit
48,253
""" Form classes """ import copy import datetime import warnings from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.fields import Field, FileField from django.forms.utils import ErrorDict, ErrorList, RenderableFormMixin from django.forms.widgets import Media, MediaDefiningClass from django.utils.datastructures import MultiValueDict from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import cached_property from django.utils.html import conditional_escape from django.utils.safestring import SafeString, mark_safe from django.utils.translation import gettext as _ from .renderers import get_default_renderer __all__ = ("BaseForm", "Form") class DeclarativeFieldsMetaclass(MediaDefiningClass): """Collect Fields declared on the base classes.""" def __new__(mcs, name, bases, attrs): # Collect fields from current class and remove them from attrs. attrs["declared_fields"] = { key: attrs.pop(key) for key, value in list(attrs.items()) if isinstance(value, Field) } new_class = super().__new__(mcs, name, bases, attrs) # Walk through the MRO. declared_fields = {} for base in reversed(new_class.__mro__): # Collect fields from base class. if hasattr(base, "declared_fields"): declared_fields.update(base.declared_fields) # Field shadowing. for attr, value in base.__dict__.items(): if value is None and attr in declared_fields: declared_fields.pop(attr) new_class.base_fields = declared_fields new_class.declared_fields = declared_fields return new_class class BaseForm(RenderableFormMixin): """ The main implementation of all the Form logic. Note that this class is different than Form. See the comments by the Form class for more info. Any improvements to the form API should be made to this class, not to the Form class. """ default_renderer = None field_order = None prefix = None use_required_attribute = True template_name_div = "django/forms/div.html" template_name_p = "django/forms/p.html" template_name_table = "django/forms/table.html" template_name_ul = "django/forms/ul.html" template_name_label = "django/forms/label.html" def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, field_order=None, use_required_attribute=None, renderer=None, ): self.is_bound = data is not None or files is not None self.data = MultiValueDict() if data is None else data self.files = MultiValueDict() if files is None else files self.auto_id = auto_id if prefix is not None: self.prefix = prefix self.initial = initial or {} self.error_class = error_class # Translators: This is the default suffix added to form field labels self.label_suffix = label_suffix if label_suffix is not None else _(":") self.empty_permitted = empty_permitted self._errors = None # Stores the errors after clean() has been called. # The base_fields class attribute is the *class-wide* definition of # fields. Because a particular *instance* of the class might want to # alter self.fields, we create self.fields here by copying base_fields. # Instances should always modify self.fields; they should not modify # self.base_fields. self.fields = copy.deepcopy(self.base_fields) self._bound_fields_cache = {} self.order_fields(self.field_order if field_order is None else field_order) if use_required_attribute is not None: self.use_required_attribute = use_required_attribute if self.empty_permitted and self.use_required_attribute: raise ValueError( "The empty_permitted and use_required_attribute arguments may " "not both be True." ) # Initialize form renderer. Use a global default if not specified # either as an argument or as self.default_renderer. if renderer is None: if self.default_renderer is None: renderer = get_default_renderer() else: renderer = self.default_renderer if isinstance(self.default_renderer, type): renderer = renderer() self.renderer = renderer def order_fields(self, field_order): """ Rearrange the fields according to field_order. field_order is a list of field names specifying the order. Append fields not included in the list in the default order for backward compatibility with subclasses not overriding field_order. If field_order is None, keep all fields in the order defined in the class. Ignore unknown fields in field_order to allow disabling fields in form subclasses without redefining ordering. """ if field_order is None: return fields = {} for key in field_order: try: fields[key] = self.fields.pop(key) except KeyError: # ignore unknown fields pass fields.update(self.fields) # add remaining fields in original order self.fields = fields def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = self.is_bound and not self._errors return "<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>" % { "cls": self.__class__.__name__, "bound": self.is_bound, "valid": is_valid, "fields": ";".join(self.fields), } def _bound_items(self): """Yield (name, bf) pairs, where bf is a BoundField object.""" for name in self.fields: yield name, self[name] def __iter__(self): """Yield the form's fields as BoundField objects.""" for name in self.fields: yield self[name] def __getitem__(self, name): """Return a BoundField with the given name.""" try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ", ".join(sorted(self.fields)), ) ) if name not in self._bound_fields_cache: self._bound_fields_cache[name] = field.get_bound_field(self, name) return self._bound_fields_cache[name] @property def errors(self): """Return an ErrorDict for the data provided for the form.""" if self._errors is None: self.full_clean() return self._errors def is_valid(self): """Return True if the form has no errors, or False otherwise.""" return self.is_bound and not self.errors def add_prefix(self, field_name): """ Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override. """ return "%s-%s" % (self.prefix, field_name) if self.prefix else field_name def add_initial_prefix(self, field_name): """Add an 'initial' prefix for checking dynamic initial values.""" return "initial-%s" % self.add_prefix(field_name) def _widget_data_value(self, widget, html_name): # value_from_datadict() gets the data from the data dictionaries. # Each widget type knows how to retrieve its own data, because some # widgets split data over several HTML fields. return widget.value_from_datadict(self.data, self.files, html_name) def _html_output( self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row ): "Output HTML. Used by as_table(), as_ul(), as_p()." warnings.warn( "django.forms.BaseForm._html_output() is deprecated. " "Please use .render() and .get_context() instead.", RemovedInDjango50Warning, stacklevel=2, ) # Errors that should be displayed above all fields. top_errors = self.non_field_errors().copy() output, hidden_fields = [], [] for name, bf in self._bound_items(): field = bf.field html_class_attr = "" bf_errors = self.error_class(bf.errors) if bf.is_hidden: if bf_errors: top_errors.extend( [ _("(Hidden field %(name)s) %(error)s") % {"name": name, "error": str(e)} for e in bf_errors ] ) hidden_fields.append(str(bf)) else: # Create a 'class="..."' attribute if the row should have any # CSS classes applied. css_classes = bf.css_classes() if css_classes: html_class_attr = ' class="%s"' % css_classes if errors_on_separate_row and bf_errors: output.append(error_row % str(bf_errors)) if bf.label: label = conditional_escape(bf.label) label = bf.label_tag(label) or "" else: label = "" if field.help_text: help_text = help_text_html % field.help_text else: help_text = "" output.append( normal_row % { "errors": bf_errors, "label": label, "field": bf, "help_text": help_text, "html_class_attr": html_class_attr, "css_classes": css_classes, "field_name": bf.html_name, } ) if top_errors: output.insert(0, error_row % top_errors) if hidden_fields: # Insert any hidden fields in the last row. str_hidden = "".join(hidden_fields) if output: last_row = output[-1] # Chop off the trailing row_ender (e.g. '</td></tr>') and # insert the hidden fields. if not last_row.endswith(row_ender): # This can happen in the as_p() case (and possibly others # that users write): if there are only top errors, we may # not be able to conscript the last row for our purposes, # so insert a new, empty row. last_row = normal_row % { "errors": "", "label": "", "field": "", "help_text": "", "html_class_attr": html_class_attr, "css_classes": "", "field_name": "", } output.append(last_row) output[-1] = last_row[: -len(row_ender)] + str_hidden + row_ender else: # If there aren't any rows in the output, just append the # hidden fields. output.append(str_hidden) return mark_safe("\n".join(output)) @property def template_name(self): return self.renderer.form_template_name def get_context(self): fields = [] hidden_fields = [] top_errors = self.non_field_errors().copy() for name, bf in self._bound_items(): bf_errors = self.error_class(bf.errors, renderer=self.renderer) if bf.is_hidden: if bf_errors: top_errors += [ _("(Hidden field %(name)s) %(error)s") % {"name": name, "error": str(e)} for e in bf_errors ] hidden_fields.append(bf) else: errors_str = str(bf_errors) # RemovedInDjango50Warning. if not isinstance(errors_str, SafeString): warnings.warn( f"Returning a plain string from " f"{self.error_class.__name__} is deprecated. Please " f"customize via the template system instead.", RemovedInDjango50Warning, ) errors_str = mark_safe(errors_str) fields.append((bf, errors_str)) return { "form": self, "fields": fields, "hidden_fields": hidden_fields, "errors": top_errors, } def non_field_errors(self): """ Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. """ return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class="nonfield", renderer=self.renderer), ) def add_error(self, field, error): """ Update the content of `self._errors`. The `field` argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The `error` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An "error" can be either a simple string or an instance of ValidationError with its message attribute set and a "list or dictionary" can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. If `error` is a dictionary, the `field` argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary. """ if not isinstance(error, ValidationError): # Normalize to ValidationError and let its constructor # do the hard work of making sense of the input. error = ValidationError(error) if hasattr(error, "error_dict"): if field is not None: raise TypeError( "The argument `field` must be `None` when the `error` " "argument contains errors for multiple fields." ) else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field != NON_FIELD_ERRORS and field not in self.fields: raise ValueError( "'%s' has no field named '%s'." % (self.__class__.__name__, field) ) if field == NON_FIELD_ERRORS: self._errors[field] = self.error_class( error_class="nonfield", renderer=self.renderer ) else: self._errors[field] = self.error_class(renderer=self.renderer) self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field] def has_error(self, field, code=None): return field in self.errors and ( code is None or any(error.code == code for error in self.errors.as_data()[field]) ) def full_clean(self): """ Clean all of self.data and populate self._errors and self.cleaned_data. """ self._errors = ErrorDict() if not self.is_bound: # Stop further processing. return self.cleaned_data = {} # If the form is permitted to be empty, and none of the form data has # changed from the initial data, short circuit any validation. if self.empty_permitted and not self.has_changed(): return self._clean_fields() self._clean_form() self._post_clean() def _clean_fields(self): for name, bf in self._bound_items(): field = bf.field value = bf.initial if field.disabled else bf.data try: if isinstance(field, FileField): value = field.clean(value, bf.initial) else: value = field.clean(value) self.cleaned_data[name] = value if hasattr(self, "clean_%s" % name): value = getattr(self, "clean_%s" % name)() self.cleaned_data[name] = value except ValidationError as e: self.add_error(name, e) def _clean_form(self): try: cleaned_data = self.clean() except ValidationError as e: self.add_error(None, e) else: if cleaned_data is not None: self.cleaned_data = cleaned_data def _post_clean(self): """ An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms. """ pass def clean(self): """ Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'. """ return self.cleaned_data def has_changed(self): """Return True if data differs from initial.""" return bool(self.changed_data) @cached_property def changed_data(self): return [name for name, bf in self._bound_items() if bf._has_changed()] @property def media(self): """Return all media required to render the widgets on this form.""" media = Media() for field in self.fields.values(): media += field.widget.media return media def is_multipart(self): """ Return True if the form needs to be multipart-encoded, i.e. it has FileInput, or False otherwise. """ return any(field.widget.needs_multipart_form for field in self.fields.values()) def hidden_fields(self): """ Return a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates. """ return [field for field in self if field.is_hidden] def visible_fields(self): """ Return a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method. """ return [field for field in self if not field.is_hidden] def get_initial_for_field(self, field, field_name): """ Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values. """ value = self.initial.get(field_name, field.initial) if callable(value): value = value() # If this is an auto-generated default date, nix the microseconds # for standardized handling. See #22502. if ( isinstance(value, (datetime.datetime, datetime.time)) and not field.widget.supports_microseconds ): value = value.replace(microsecond=0) return value class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass): "A collection of Fields, plus their associated data." # This is a separate class from BaseForm in order to abstract the way # self.fields is specified. This class (Form) is the one that does the # fancy metaclass stuff purely for the semantic sugar -- it allows one # to define a form using declarative syntax. # BaseForm itself has no way of designating self.fields.
castiel248/Convert
Lib/site-packages/django/forms/forms.py
Python
mit
20,512
from django.core.exceptions import ValidationError from django.forms import Form from django.forms.fields import BooleanField, IntegerField from django.forms.renderers import get_default_renderer from django.forms.utils import ErrorList, RenderableFormMixin from django.forms.widgets import CheckboxInput, HiddenInput, NumberInput from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from django.utils.translation import ngettext_lazy __all__ = ("BaseFormSet", "formset_factory", "all_valid") # special field names TOTAL_FORM_COUNT = "TOTAL_FORMS" INITIAL_FORM_COUNT = "INITIAL_FORMS" MIN_NUM_FORM_COUNT = "MIN_NUM_FORMS" MAX_NUM_FORM_COUNT = "MAX_NUM_FORMS" ORDERING_FIELD_NAME = "ORDER" DELETION_FIELD_NAME = "DELETE" # default minimum number of forms in a formset DEFAULT_MIN_NUM = 0 # default maximum number of forms in a formset, to prevent memory exhaustion DEFAULT_MAX_NUM = 1000 class ManagementForm(Form): """ Keep track of how many form instances are displayed on the page. If adding new forms via JavaScript, you should increment the count field of this form as well. """ template_name = "django/forms/div.html" # RemovedInDjango50Warning. TOTAL_FORMS = IntegerField(widget=HiddenInput) INITIAL_FORMS = IntegerField(widget=HiddenInput) # MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of the # management form, but only for the convenience of client-side code. The # POST value of them returned from the client is not checked. MIN_NUM_FORMS = IntegerField(required=False, widget=HiddenInput) MAX_NUM_FORMS = IntegerField(required=False, widget=HiddenInput) def clean(self): cleaned_data = super().clean() # When the management form is invalid, we don't know how many forms # were submitted. cleaned_data.setdefault(TOTAL_FORM_COUNT, 0) cleaned_data.setdefault(INITIAL_FORM_COUNT, 0) return cleaned_data class BaseFormSet(RenderableFormMixin): """ A collection of instances of the same Form class. """ deletion_widget = CheckboxInput ordering_widget = NumberInput default_error_messages = { "missing_management_form": _( "ManagementForm data is missing or has been tampered with. Missing fields: " "%(field_names)s. You may need to file a bug report if the issue persists." ), "too_many_forms": ngettext_lazy( "Please submit at most %(num)d form.", "Please submit at most %(num)d forms.", "num", ), "too_few_forms": ngettext_lazy( "Please submit at least %(num)d form.", "Please submit at least %(num)d forms.", "num", ), } template_name_div = "django/forms/formsets/div.html" template_name_p = "django/forms/formsets/p.html" template_name_table = "django/forms/formsets/table.html" template_name_ul = "django/forms/formsets/ul.html" def __init__( self, data=None, files=None, auto_id="id_%s", prefix=None, initial=None, error_class=ErrorList, form_kwargs=None, error_messages=None, ): self.is_bound = data is not None or files is not None self.prefix = prefix or self.get_default_prefix() self.auto_id = auto_id self.data = data or {} self.files = files or {} self.initial = initial self.form_kwargs = form_kwargs or {} self.error_class = error_class self._errors = None self._non_form_errors = None messages = {} for cls in reversed(type(self).__mro__): messages.update(getattr(cls, "default_error_messages", {})) if error_messages is not None: messages.update(error_messages) self.error_messages = messages def __iter__(self): """Yield the forms in the order they should be rendered.""" return iter(self.forms) def __getitem__(self, index): """Return the form at the given index, based on the rendering order.""" return self.forms[index] def __len__(self): return len(self.forms) def __bool__(self): """ Return True since all formsets have a management form which is not included in the length. """ return True def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = ( self.is_bound and not self._non_form_errors and not any(form_errors for form_errors in self._errors) ) return "<%s: bound=%s valid=%s total_forms=%s>" % ( self.__class__.__qualname__, self.is_bound, is_valid, self.total_form_count(), ) @cached_property def management_form(self): """Return the ManagementForm instance for this FormSet.""" if self.is_bound: form = ManagementForm( self.data, auto_id=self.auto_id, prefix=self.prefix, renderer=self.renderer, ) form.full_clean() else: form = ManagementForm( auto_id=self.auto_id, prefix=self.prefix, initial={ TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num, }, renderer=self.renderer, ) return form def total_form_count(self): """Return the total number of forms in this FormSet.""" if self.is_bound: # return absolute_max if it is lower than the actual total form # count in the data; this is DoS protection to prevent clients # from forcing the server to instantiate arbitrary numbers of # forms return min( self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max ) else: initial_forms = self.initial_form_count() total_forms = max(initial_forms, self.min_num) + self.extra # Allow all existing related objects/inlines to be displayed, # but don't allow extra beyond max_num. if initial_forms > self.max_num >= 0: total_forms = initial_forms elif total_forms > self.max_num >= 0: total_forms = self.max_num return total_forms def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if self.is_bound: return self.management_form.cleaned_data[INITIAL_FORM_COUNT] else: # Use the length of the initial data if it's there, 0 otherwise. initial_forms = len(self.initial) if self.initial else 0 return initial_forms @cached_property def forms(self): """Instantiate forms at first property access.""" # DoS protection is included in total_form_count() return [ self._construct_form(i, **self.get_form_kwargs(i)) for i in range(self.total_form_count()) ] def get_form_kwargs(self, index): """ Return additional keyword arguments for each individual formset form. index will be None if the form being constructed is a new empty form. """ return self.form_kwargs.copy() def _construct_form(self, i, **kwargs): """Instantiate and return the i-th form instance in a formset.""" defaults = { "auto_id": self.auto_id, "prefix": self.add_prefix(i), "error_class": self.error_class, # Don't render the HTML 'required' attribute as it may cause # incorrect validation for extra, optional, and deleted # forms in the formset. "use_required_attribute": False, "renderer": self.renderer, } if self.is_bound: defaults["data"] = self.data defaults["files"] = self.files if self.initial and "initial" not in kwargs: try: defaults["initial"] = self.initial[i] except IndexError: pass # Allow extra forms to be empty, unless they're part of # the minimum forms. if i >= self.initial_form_count() and i >= self.min_num: defaults["empty_permitted"] = True defaults.update(kwargs) form = self.form(**defaults) self.add_fields(form, i) return form @property def initial_forms(self): """Return a list of all the initial forms in this formset.""" return self.forms[: self.initial_form_count()] @property def extra_forms(self): """Return a list of all the extra forms in this formset.""" return self.forms[self.initial_form_count() :] @property def empty_form(self): form_kwargs = { **self.get_form_kwargs(None), "auto_id": self.auto_id, "prefix": self.add_prefix("__prefix__"), "empty_permitted": True, "use_required_attribute": False, "renderer": self.renderer, } form = self.form(**form_kwargs) self.add_fields(form, None) return form @property def cleaned_data(self): """ Return a list of form.cleaned_data dicts for every form in self.forms. """ if not self.is_valid(): raise AttributeError( "'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__ ) return [form.cleaned_data for form in self.forms] @property def deleted_forms(self): """Return a list of forms that have been marked for deletion.""" if not self.is_valid() or not self.can_delete: return [] # construct _deleted_form_indexes which is just a list of form indexes # that have had their deletion widget set to True if not hasattr(self, "_deleted_form_indexes"): self._deleted_form_indexes = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue if self._should_delete_form(form): self._deleted_form_indexes.append(i) return [self.forms[i] for i in self._deleted_form_indexes] @property def ordered_forms(self): """ Return a list of form in the order specified by the incoming data. Raise an AttributeError if ordering is not allowed. """ if not self.is_valid() or not self.can_order: raise AttributeError( "'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__ ) # Construct _ordering, which is a list of (form_index, order_field_value) # tuples. After constructing this list, we'll sort it by order_field_value # so we have a way to get to the form indexes in the order specified # by the form data. if not hasattr(self, "_ordering"): self._ordering = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue # don't add data marked for deletion to self.ordered_data if self.can_delete and self._should_delete_form(form): continue self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME])) # After we're done populating self._ordering, sort it. # A sort function to order things numerically ascending, but # None should be sorted below anything else. Allowing None as # a comparison value makes it so we can leave ordering fields # blank. def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) self._ordering.sort(key=compare_ordering_key) # Return a list of form.cleaned_data dicts in the order specified by # the form data. return [self.forms[i[0]] for i in self._ordering] @classmethod def get_default_prefix(cls): return "form" @classmethod def get_deletion_widget(cls): return cls.deletion_widget @classmethod def get_ordering_widget(cls): return cls.ordering_widget def non_form_errors(self): """ Return an ErrorList of errors that aren't associated with a particular form -- i.e., from formset.clean(). Return an empty ErrorList if there are none. """ if self._non_form_errors is None: self.full_clean() return self._non_form_errors @property def errors(self): """Return a list of form.errors for every form in self.forms.""" if self._errors is None: self.full_clean() return self._errors def total_error_count(self): """Return the number of errors across all forms in the formset.""" return len(self.non_form_errors()) + sum( len(form_errors) for form_errors in self.errors ) def _should_delete_form(self, form): """Return whether or not the form was marked for deletion.""" return form.cleaned_data.get(DELETION_FIELD_NAME, False) def is_valid(self): """Return True if every form in self.forms is valid.""" if not self.is_bound: return False # Accessing errors triggers a full clean the first time only. self.errors # List comprehension ensures is_valid() is called for all forms. # Forms due to be deleted shouldn't cause the formset to be invalid. forms_valid = all( [ form.is_valid() for form in self.forms if not (self.can_delete and self._should_delete_form(form)) ] ) return forms_valid and not self.non_form_errors() def full_clean(self): """ Clean all of self.data and populate self._errors and self._non_form_errors. """ self._errors = [] self._non_form_errors = self.error_class( error_class="nonform", renderer=self.renderer ) empty_forms_count = 0 if not self.is_bound: # Stop further processing. return if not self.management_form.is_valid(): error = ValidationError( self.error_messages["missing_management_form"], params={ "field_names": ", ".join( self.management_form.add_prefix(field_name) for field_name in self.management_form.errors ), }, code="missing_management_form", ) self._non_form_errors.append(error) for i, form in enumerate(self.forms): # Empty forms are unchanged forms beyond those with initial data. if not form.has_changed() and i >= self.initial_form_count(): empty_forms_count += 1 # Accessing errors calls full_clean() if necessary. # _should_delete_form() requires cleaned_data. form_errors = form.errors if self.can_delete and self._should_delete_form(form): continue self._errors.append(form_errors) try: if ( self.validate_max and self.total_form_count() - len(self.deleted_forms) > self.max_num ) or self.management_form.cleaned_data[ TOTAL_FORM_COUNT ] > self.absolute_max: raise ValidationError( self.error_messages["too_many_forms"] % {"num": self.max_num}, code="too_many_forms", ) if ( self.validate_min and self.total_form_count() - len(self.deleted_forms) - empty_forms_count < self.min_num ): raise ValidationError( self.error_messages["too_few_forms"] % {"num": self.min_num}, code="too_few_forms", ) # Give self.clean() a chance to do cross-form validation. self.clean() except ValidationError as e: self._non_form_errors = self.error_class( e.error_list, error_class="nonform", renderer=self.renderer, ) def clean(self): """ Hook for doing any extra formset-wide cleaning after Form.clean() has been called on every form. Any ValidationError raised by this method will not be associated with a particular form; it will be accessible via formset.non_form_errors() """ pass def has_changed(self): """Return True if data in any form differs from initial.""" return any(form.has_changed() for form in self) def add_fields(self, form, index): """A hook for adding extra fields on to each form instance.""" initial_form_count = self.initial_form_count() if self.can_order: # Only pre-fill the ordering field for initial forms. if index is not None and index < initial_form_count: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_("Order"), initial=index + 1, required=False, widget=self.get_ordering_widget(), ) else: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_("Order"), required=False, widget=self.get_ordering_widget(), ) if self.can_delete and ( self.can_delete_extra or (index is not None and index < initial_form_count) ): form.fields[DELETION_FIELD_NAME] = BooleanField( label=_("Delete"), required=False, widget=self.get_deletion_widget(), ) def add_prefix(self, index): return "%s-%s" % (self.prefix, index) def is_multipart(self): """ Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise. """ if self.forms: return self.forms[0].is_multipart() else: return self.empty_form.is_multipart() @property def media(self): # All the forms on a FormSet are the same, so you only need to # interrogate the first form for media. if self.forms: return self.forms[0].media else: return self.empty_form.media @property def template_name(self): return self.renderer.formset_template_name def get_context(self): return {"formset": self} def formset_factory( form, formset=BaseFormSet, extra=1, can_order=False, can_delete=False, max_num=None, validate_max=False, min_num=None, validate_min=False, absolute_max=None, can_delete_extra=True, renderer=None, ): """Return a FormSet for the given form class.""" if min_num is None: min_num = DEFAULT_MIN_NUM if max_num is None: max_num = DEFAULT_MAX_NUM # absolute_max is a hard limit on forms instantiated, to prevent # memory-exhaustion attacks. Default to max_num + DEFAULT_MAX_NUM # (which is 2 * DEFAULT_MAX_NUM if max_num is None in the first place). if absolute_max is None: absolute_max = max_num + DEFAULT_MAX_NUM if max_num > absolute_max: raise ValueError("'absolute_max' must be greater or equal to 'max_num'.") attrs = { "form": form, "extra": extra, "can_order": can_order, "can_delete": can_delete, "can_delete_extra": can_delete_extra, "min_num": min_num, "max_num": max_num, "absolute_max": absolute_max, "validate_min": validate_min, "validate_max": validate_max, "renderer": renderer or get_default_renderer(), } return type(form.__name__ + "FormSet", (formset,), attrs) def all_valid(formsets): """Validate every formset and return True if all are valid.""" # List comprehension ensures is_valid() is called for all formsets. return all([formset.is_valid() for formset in formsets])
castiel248/Convert
Lib/site-packages/django/forms/formsets.py
Python
mit
21,167
{% for name, value in attrs.items() %}{% if value is not sameas False %} {{ name }}{% if value is not sameas True %}="{{ value }}"{% endif %}{% endif %}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/attrs.html
HTML
mit
165
{% include "django/forms/table.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/default.html
HTML
mit
40
{{ errors }} {% if errors and not fields %} <div>{% for field in hidden_fields %}{{ field }}{% endfor %}</div> {% endif %} {% for field, errors in fields %} <div{% set classes = field.css_classes() %}{% if classes %} class="{{ classes }}"{% endif %}> {% if field.use_fieldset %} <fieldset> {% if field.label %}{{ field.legend_tag() }}{% endif %} {% else %} {% if field.label %}{{ field.label_tag() }}{% endif %} {% endif %} {% if field.help_text %}<div class="helptext">{{ field.help_text|safe }}</div>{% endif %} {{ errors }} {{ field }} {% if field.use_fieldset %}</fieldset>{% endif %} {% if loop.last %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </div> {% endfor %} {% if not fields and not errors %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/div.html
HTML
mit
865
{% include "django/forms/errors/dict/ul.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/dict/default.html
HTML
mit
49
{% for field, errors in errors %}* {{ field }} {% for error in errors %} * {{ error }} {% endfor %}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/dict/text.txt
Text
mit
113
{% if errors %}<ul class="{{ error_class }}">{% for field, error in errors %}<li>{{ field }}{{ error }}</li>{% endfor %}</ul>{% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/dict/ul.html
HTML
mit
137
{% include "django/forms/errors/list/ul.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/list/default.html
HTML
mit
49
{% for error in errors %}* {{ error }} {% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/list/text.txt
Text
mit
52
{% if errors %}<ul class="{{ error_class }}">{% for error in errors %}<li>{{ error }}</li>{% endfor %}</ul>{% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/errors/list/ul.html
HTML
mit
119
{{ formset.management_form }}{% for form in formset %}{{ form }}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/formsets/default.html
HTML
mit
77
{{ formset.management_form }}{% for form in formset %}{{ form.as_div() }}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/formsets/div.html
HTML
mit
86
{{ formset.management_form }}{% for form in formset %}{{ form.as_p() }}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/formsets/p.html
HTML
mit
84
{{ formset.management_form }}{% for form in formset %}{{ form.as_table() }}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/formsets/table.html
HTML
mit
88
{{ formset.management_form }}{% for form in formset %}{{ form.as_ul() }}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/formsets/ul.html
HTML
mit
85
{% if use_tag %}<{{ tag }}{% if attrs %}{% include 'django/forms/attrs.html' %}{% endif %}>{{ label }}</{{ tag }}>{% else %}{{ label }}{% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/label.html
HTML
mit
147
{{ errors }} {% if errors and not fields %} <p>{% for field in hidden_fields %}{{ field }}{% endfor %}</p> {% endif %} {% for field, errors in fields %} {{ errors }} <p{% set classes = field.css_classes() %}{% if classes %} class="{{ classes }}"{% endif %}> {% if field.label %}{{ field.label_tag() }}{% endif %} {{ field }} {% if field.help_text %} <span class="helptext">{{ field.help_text|safe }}</span> {% endif %} {% if loop.last %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </p> {% endfor %} {% if not fields and not errors %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/p.html
HTML
mit
673
{% if errors %} <tr> <td colspan="2"> {{ errors }} {% if not fields %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </td> </tr> {% endif %} {% for field, errors in fields %} <tr{% set classes = field.css_classes() %}{% if classes %} class="{{ classes }}"{% endif %}> <th>{% if field.label %}{{ field.label_tag() }}{% endif %}</th> <td> {{ errors }} {{ field }} {% if field.help_text %} <br> <span class="helptext">{{ field.help_text|safe }}</span> {% endif %} {% if loop.last %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </td> </tr> {% endfor %} {% if not fields and not errors %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/table.html
HTML
mit
814
{% if errors %} <li> {{ errors }} {% if not fields %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </li> {% endif %} {% for field, errors in fields %} <li{% set classes = field.css_classes() %}{% if classes %} class="{{ classes }}"{% endif %}> {{ errors }} {% if field.label %}{{ field.label_tag() }}{% endif %} {{ field }} {% if field.help_text %} <span class="helptext">{{ field.help_text|safe }}</span> {% endif %} {% if loop.last %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %} </li> {% endfor %} {% if not fields and not errors %} {% for field in hidden_fields %}{{ field }}{% endfor %} {% endif %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/ul.html
HTML
mit
712
{% for name, value in widget.attrs.items() %}{% if value is not sameas False %} {{ name }}{% if value is not sameas True %}="{{ value }}"{% endif %}{% endif %}{% endfor %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/attrs.html
HTML
mit
172
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/checkbox.html
HTML
mit
48
{% include "django/forms/widgets/input_option.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/checkbox_option.html
HTML
mit
55
{% include "django/forms/widgets/multiple_input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/checkbox_select.html
HTML
mit
57
{% if widget.is_initial %}{{ widget.initial_text }}: <a href="{{ widget.value.url }}">{{ widget.value }}</a>{% if not widget.required %} <input type="checkbox" name="{{ widget.checkbox_name }}" id="{{ widget.checkbox_id }}"{% if widget.attrs.disabled %} disabled{% endif %}> <label for="{{ widget.checkbox_id }}">{{ widget.clear_checkbox_label }}</label>{% endif %}<br> {{ widget.input_text }}:{% endif %} <input type="{{ widget.type }}" name="{{ widget.name }}"{% include "django/forms/widgets/attrs.html" %}>
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/clearable_file_input.html
HTML
mit
511
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/date.html
HTML
mit
48
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/datetime.html
HTML
mit
48
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/email.html
HTML
mit
48
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/file.html
HTML
mit
48
{% include "django/forms/widgets/input.html" %}
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/hidden.html
HTML
mit
48
<input type="{{ widget.type }}" name="{{ widget.name }}"{% if widget.value != None %} value="{{ widget.value }}"{% endif %}{% include "django/forms/widgets/attrs.html" %}>
castiel248/Convert
Lib/site-packages/django/forms/jinja2/django/forms/widgets/input.html
HTML
mit
172