Skip to content

Module fl_server_api.views.inference

View Source
# SPDX-FileCopyrightText: 2024 Benedikt Franke <benedikt.franke@dlr.de>
# SPDX-FileCopyrightText: 2024 Florian Heinrich <florian.heinrich@dlr.de>
#
# SPDX-License-Identifier: Apache-2.0

import base64
from django.http import HttpRequest, HttpResponse
from drf_spectacular.utils import inline_serializer, extend_schema, OpenApiExample
import json
from io import BytesIO
import pickle
from PIL import Image
from rest_framework import status
from rest_framework.exceptions import APIException, UnsupportedMediaType, ValidationError
from rest_framework.fields import CharField, ChoiceField, DictField, FloatField, ListField
import torch
from torchvision.transforms.functional import to_tensor
from typing import Any, Dict, List, Literal, Optional, Tuple, Type

from fl_server_ai.uncertainty import get_uncertainty_class, UncertaintyBase
from fl_server_core.exceptions import TorchDeserializationException
from fl_server_core.models import Model, GlobalModel, LocalModel
from fl_server_core.utils.logging import disable_logger
from fl_server_core.utils.torch_serialization import to_torch_tensor

from .base import ViewSet
from ..serializers.generic import ErrorSerializer
from ..utils import get_entity


class Inference(ViewSet):
    """
    Inference ViewSet for performing inference on a model.
    """

    serializer_class = inline_serializer("InferenceSerializer", fields={
        "inference": ListField(child=ListField(child=FloatField())),
        "uncertainty": DictField(child=FloatField())
    })
    """The serializer for the ViewSet."""

    @extend_schema(
        request=inline_serializer(
            "InferenceJsonSerializer",
            fields={
                "model_id": CharField(),
                "model_input": ListField(child=ListField(child=FloatField())),
                "return_format": ChoiceField(["binary", "json"])
            }
        ),
        responses={
            status.HTTP_200_OK: serializer_class,
            status.HTTP_400_BAD_REQUEST: ErrorSerializer,
        },
        examples=[
            OpenApiExample("JSON Example", value={
                "model_id": "mymodel",
                "model_input": [
                    [1.0, 2.3, -0.4, 3],
                    [0.01, 9.7, 5.6, 7]
                ],
                "return_format": "json"
            }, request_only=True),
        ]
    )
    def inference(self, request: HttpRequest) -> HttpResponse:
        """
        Performs inference on the provided model and input data.

        This method takes in an HTTP request containing the necessary metadata and input data,
        performs any required preprocessing on the input data, runs the inference using the specified model,
        and returns a response in the format specified by the `return_format` parameter including
        possible uncertainty measurements if defined.

        Args:
            request (HttpRequest): The current HTTP request.

        Returns:
            HttpResponse: A HttpResponse containing the result of the inference as well as its uncertainty.
        """
        request_body, is_json = self._get_handle_content_type(request)
        model, preprocessing, input_shape, return_format = self._get_inference_metadata(
            request_body,
            "json" if is_json else "binary"
        )
        model_input = self._get_model_input(request, request_body)

        if preprocessing:
            model_input = preprocessing(model_input)
        else:
            # if no preprocessing is defined, at least try to convert/interpret the model_input as
            # PyTorch tensor, before raising an exception
            model_input = self._try_cast_model_input_to_tensor(model_input)
        self._validate_model_input_after_preprocessing(model_input, input_shape, bool(preprocessing))

        uncertainty_cls, inference, uncertainty = self._do_inference(model, model_input)
        return self._make_response(uncertainty_cls, inference, uncertainty, return_format)

    def _get_handle_content_type(self, request: HttpRequest) -> Tuple[dict, bool]:
        """
        Handles HTTP request body based on their content type.

        This function checks if the request content type is either `application/json`
        or `multipart/form-data`. If it matches, it returns the corresponding data and
        a boolean indicating whether it's JSON (True) or multipart/form-data (False).

        Args:
            request (HttpRequest): The request.

        Returns:
            tuple: A tuple containing the parsed data and a boolean indicating the content type.
                * If content type is `application/json`, returns the JSON payload as a Python object (dict)
                and True to indicate it's JSON.
                * If content type is `multipart/form-data`, returns the request POST data and False.

        Raises:
            UnsupportedMediaType: If an unknown content type is specified, raising an error with
                details on supported types (`application/json` and `multipart/form-data`).
        """
        match request.content_type.lower():
            case s if s.startswith("multipart/form-data"):
                return request.POST, False
            case s if s.startswith("application/json"):
                return json.loads(request.body), True

        # if the content type is specified, but not supported, return 415
        self._logger.error(f"Unknown Content-Type '{request.content_type}'")
        raise UnsupportedMediaType(
            "Only Content-Type 'application/json' and 'multipart/form-data' is supported."
        )

    def _get_inference_metadata(
        self,
        request_body: dict,
        return_format_default: Literal["binary", "json"]
    ) -> Tuple[Model, Optional[torch.nn.Module], Optional[List[Optional[int]]], str]:
        """
        Retrieves inference metadata based on the content of the provided request body.

        This method checks if a `model_id` is present in the request body and retrieves
        the corresponding model entity. It then determines the return format based on the
        request body or default to one of the two supported formats (`binary` or `json`).

        Args:
            request_body (dict): The data sent with the request, containing at least `model_id`.
            return_format_default (Literal["binary", "json"]): The default return format to use if not specified in
                the request body.

        Returns:
            Tuple[Model, Optional[torch.nn.Module], Optional[List[Optional[int]]], str]: A tuple containing:
                * The retrieved model entity.
                * The global model's preprocessing torch module (if applicable).
                * The input shape of the global model (if applicable).
                * The return format (`binary` or `json`).

        Raises:
            ValidationError: If no valid `model_id` is provided in the request body, or if an unknown return format
                is specified.
        """
        if "model_id" not in request_body:
            self._logger.error("No 'model_id' provided in request.")
            raise ValidationError("No 'model_id' provided in request.")
        model_id = request_body["model_id"]
        model = get_entity(Model, pk=model_id)

        return_format = request_body.get("return_format", return_format_default)
        if return_format not in ["binary", "json"]:
            self._logger.error(f"Unknown return format '{return_format}'. Supported are binary and json.")
            raise ValidationError(f"Unknown return format '{return_format}'. Supported are binary and json.")

        global_model: Optional[GlobalModel] = None
        if isinstance(model, GlobalModel):
            global_model = model
        elif isinstance(model, LocalModel):
            global_model = model.base_model
        else:
            self._logger.error("Unknown model type. Not a GlobalModel and not a LocalModel. Skip preprocessing.")

        preprocessing: Optional[torch.nn.Module] = None
        input_shape: Optional[List[Optional[int]]] = None
        if global_model:
            if global_model.preprocessing is not None:
                preprocessing = global_model.get_preprocessing_torch_model()
            if global_model.input_shape is not None:
                input_shape = global_model.input_shape

        return model, preprocessing, input_shape, return_format

    def _get_model_input(self, request: HttpRequest, request_body: dict) -> Any:
        """
        Retrieves and decodes the model input from either an uploaded file or the request body.

        Args:
            request (HttpRequest): The current HTTP request.
            request_body (dict): The parsed request body as a dictionary.

        Returns:
            Any: The decoded model input data.

        Raises:
            ValidationError: If no `model_input` is found in the uploaded file or the request body.
        """
        uploaded_file = request.FILES.get("model_input", None)
        if uploaded_file and uploaded_file.file:
            model_input = uploaded_file.file.read()
        else:
            model_input = request_body.get("model_input", None)
        if not model_input:
            raise ValidationError("No uploaded file 'model_input' found.")
        return self._try_decode_model_input(model_input)

    def _try_decode_model_input(self, model_input: Any) -> Any:
        """
        Attempts to decode the input `model_input` from various formats and returns it in a usable form.

        This function first tries to deserialize the input as a PyTorch tensor. If that fails, it attempts to
        decode the input as a base64-encoded string. If neither attempt is successful, the original input is returned.

        Args:
            model_input (Any): The input to be decoded, which can be in any format.

        Returns:
            Any: The decoded input, which may still be in an unknown format if decoding attempts fail.
        """
        # 1. try to deserialize model_input as PyTorch tensor
        try:
            with disable_logger(self._logger):
                model_input = to_torch_tensor(model_input)
        except Exception:
            pass
        # 2. try to decode model_input as base64
        try:
            is_base64, tmp_model_input = self._is_base64(model_input)
            if is_base64:
                model_input = tmp_model_input
        except Exception:
            pass
        # result
        return model_input

    def _try_cast_model_input_to_tensor(self, model_input: Any) -> Any:
        """
        Attempt to cast the given model input to a PyTorch tensor.

        This function tries to interpret the input in several formats:

        1. PIL Image (and later convert it to a PyTorch tensor, see 3.)
        2. PyTorch tensor via `torch.as_tensor`
        3. PyTorch tensor via torchvision `ToTensor` (supports e.g. PIL images)

        If none of these attempts are successful, the original input is returned.

        Args:
            model_input: The input data to be cast to a PyTorch tensor.
                Can be any type that can be converted to a tensor.

        Returns:
            A PyTorch tensor representation of the input data, or the original
            input if it cannot be converted.
        """
        def _try_to_pil_image(model_input: Any) -> Any:
            stream = BytesIO(model_input)
            return Image.open(stream)

        if isinstance(model_input, torch.Tensor):
            return model_input

        # In the following order, try to:
        # 1. interpret model_input as PIL image (and later to PyTorch tensor, see step 3),
        # 2. interpret model_input as PyTorch tensor,
        # 3. interpret model_input as PyTorch tensor via torchvision ToTensor (supports e.g. PIL images).
        for fn in [_try_to_pil_image, torch.as_tensor, to_tensor]:
            try:
                model_input = fn(model_input)  # type: ignore
            except Exception:
                pass
        return model_input

    def _is_base64(self, sb: str | bytes) -> Tuple[bool, bytes]:
        """
        Check if a string or bytes object is a valid Base64 encoded string.

        This function checks if the input can be decoded and re-encoded without any changes.
        If decoding and encoding returns the same result as the original input, it's likely
        that the input was indeed a valid Base64 encoded string.

        Note: This code is based on the reference implementation from the linked Stack Overflow answer.

        Args:
            sb (str | bytes): The input string or bytes object to check.

        Returns:
            Tuple[bool, bytes]: A tuple containing a boolean indicating whether the input is
                a valid Base64 encoded string and the decoded bytes if it is.

        References:
            https://stackoverflow.com/a/45928164
        """
        try:
            if isinstance(sb, str):
                # If there's any unicode here, an exception will be thrown and the function will return false
                sb_bytes = bytes(sb, "ascii")
            elif isinstance(sb, bytes):
                sb_bytes = sb
            else:
                raise ValueError("Argument must be string or bytes")
            decoded = base64.b64decode(sb_bytes)
            return base64.b64encode(decoded) == sb_bytes, decoded
        except Exception:
            return False, b""

    def _validate_model_input_after_preprocessing(
        self,
        model_input: Any,
        model_input_shape: Optional[List[Optional[int]]],
        preprocessing: bool
    ) -> None:
        """
        Validates the model input after preprocessing.

        Ensures that the provided `model_input` is a valid PyTorch tensor and its shape matches
        the expected`model_input_shape`.

        Args:
            model_input (Any): The model input to be validated.
            model_input_shape (Optional[List[Optional[int]]]): The expected shape of the model input.
                Can contain None values if not all dimensions are fixed (e.g. first dimension as batch size).
            preprocessing (bool): Whether a preprocessing model was defined or not. (Only for a better error message.)

        Raises:
            ValidationError: If the `model_input` is not a valid PyTorch tensor or
                its shape does not match the expected `model_input_shape`.
        """
        if not isinstance(model_input, torch.Tensor):
            msg = "Model input could not be casted or interpreted as a PyTorch tensor object"
            if preprocessing:
                msg += " and is still not a PyTorch tensor after preprecessing."
            else:
                msg += " and no preprecessing is defined."
            raise ValidationError(msg)

        if model_input_shape and not all(
            dim_input == dim_model
            for (dim_input, dim_model) in zip(model_input.shape, model_input_shape)
            if dim_model is not None
        ):
            raise ValidationError("Input shape does not match model input shape.")

    def _make_response(
        self,
        uncertainty_cls: Type[UncertaintyBase],
        inference: torch.Tensor,
        uncertainty: Any,
        return_type: str
    ) -> HttpResponse:
        """
        Build the response object with the result data.

        This method checks the return type and makes a response with the appropriate content type.

        If return_type is "binary", a binary-encoded response will be generated using pickle.
        Otherwise, a JSON response will be generated by serializing the uncertainty object using its to_json method.

        Args:
            uncertainty_cls (Type[UncertaintyBase]): The uncertainty class.
            inference (torch.Tensor): The inference.
            uncertainty (Any): The uncertainty.
            return_type (str): The return type.

        Returns:
            HttpResponse: The inference result response.
        """
        if return_type == "binary":
            response_bytes = pickle.dumps(dict(inference=inference, uncertainty=uncertainty))
            return HttpResponse(response_bytes, content_type="application/octet-stream")

        return HttpResponse(uncertainty_cls.to_json(inference, uncertainty), content_type="application/json")

    def _do_inference(
        self, model: Model, input_tensor: torch.Tensor
    ) -> Tuple[Type[UncertaintyBase], torch.Tensor, Dict[str, Any]]:
        """
        Perform inference on a given input tensor using the provided model.

        This methods retrieves the uncertainty class, performs the prediction.
        The output of this method consists of:

        * The uncertainty class used for inference
        * The result of the model's prediction on the input tensor
        * Any associated uncertainty for the prediction

        Args:
            model (Model): The model to perform inference with.
            input_tensor (torch.Tensor): Input tensor to pass through the model.

        Returns:
            Tuple[Type[UncertaintyBase], torch.Tensor, Dict[str, Any]]:
                A tuple containing the uncertainty class, prediction result, and any associated uncertainty.

        Raises:
            APIException: If an error occurs during inference
        """
        try:
            uncertainty_cls = get_uncertainty_class(model)
            inference, uncertainty = uncertainty_cls.prediction(input_tensor, model)
            return uncertainty_cls, inference, uncertainty
        except TorchDeserializationException as e:
            raise APIException(e) from e
        except Exception as e:
            self._logger.error(e)
            raise APIException("Internal Server Error occurred during inference!") from e

Classes

Inference

class Inference(
    **kwargs
)

Inference ViewSet for performing inference on a model.

View Source
class Inference(ViewSet):
    """
    Inference ViewSet for performing inference on a model.
    """

    serializer_class = inline_serializer("InferenceSerializer", fields={
        "inference": ListField(child=ListField(child=FloatField())),
        "uncertainty": DictField(child=FloatField())
    })
    """The serializer for the ViewSet."""

    @extend_schema(
        request=inline_serializer(
            "InferenceJsonSerializer",
            fields={
                "model_id": CharField(),
                "model_input": ListField(child=ListField(child=FloatField())),
                "return_format": ChoiceField(["binary", "json"])
            }
        ),
        responses={
            status.HTTP_200_OK: serializer_class,
            status.HTTP_400_BAD_REQUEST: ErrorSerializer,
        },
        examples=[
            OpenApiExample("JSON Example", value={
                "model_id": "mymodel",
                "model_input": [
                    [1.0, 2.3, -0.4, 3],
                    [0.01, 9.7, 5.6, 7]
                ],
                "return_format": "json"
            }, request_only=True),
        ]
    )
    def inference(self, request: HttpRequest) -> HttpResponse:
        """
        Performs inference on the provided model and input data.

        This method takes in an HTTP request containing the necessary metadata and input data,
        performs any required preprocessing on the input data, runs the inference using the specified model,
        and returns a response in the format specified by the `return_format` parameter including
        possible uncertainty measurements if defined.

        Args:
            request (HttpRequest): The current HTTP request.

        Returns:
            HttpResponse: A HttpResponse containing the result of the inference as well as its uncertainty.
        """
        request_body, is_json = self._get_handle_content_type(request)
        model, preprocessing, input_shape, return_format = self._get_inference_metadata(
            request_body,
            "json" if is_json else "binary"
        )
        model_input = self._get_model_input(request, request_body)

        if preprocessing:
            model_input = preprocessing(model_input)
        else:
            # if no preprocessing is defined, at least try to convert/interpret the model_input as
            # PyTorch tensor, before raising an exception
            model_input = self._try_cast_model_input_to_tensor(model_input)
        self._validate_model_input_after_preprocessing(model_input, input_shape, bool(preprocessing))

        uncertainty_cls, inference, uncertainty = self._do_inference(model, model_input)
        return self._make_response(uncertainty_cls, inference, uncertainty, return_format)

    def _get_handle_content_type(self, request: HttpRequest) -> Tuple[dict, bool]:
        """
        Handles HTTP request body based on their content type.

        This function checks if the request content type is either `application/json`
        or `multipart/form-data`. If it matches, it returns the corresponding data and
        a boolean indicating whether it's JSON (True) or multipart/form-data (False).

        Args:
            request (HttpRequest): The request.

        Returns:
            tuple: A tuple containing the parsed data and a boolean indicating the content type.
                * If content type is `application/json`, returns the JSON payload as a Python object (dict)
                and True to indicate it's JSON.
                * If content type is `multipart/form-data`, returns the request POST data and False.

        Raises:
            UnsupportedMediaType: If an unknown content type is specified, raising an error with
                details on supported types (`application/json` and `multipart/form-data`).
        """
        match request.content_type.lower():
            case s if s.startswith("multipart/form-data"):
                return request.POST, False
            case s if s.startswith("application/json"):
                return json.loads(request.body), True

        # if the content type is specified, but not supported, return 415
        self._logger.error(f"Unknown Content-Type '{request.content_type}'")
        raise UnsupportedMediaType(
            "Only Content-Type 'application/json' and 'multipart/form-data' is supported."
        )

    def _get_inference_metadata(
        self,
        request_body: dict,
        return_format_default: Literal["binary", "json"]
    ) -> Tuple[Model, Optional[torch.nn.Module], Optional[List[Optional[int]]], str]:
        """
        Retrieves inference metadata based on the content of the provided request body.

        This method checks if a `model_id` is present in the request body and retrieves
        the corresponding model entity. It then determines the return format based on the
        request body or default to one of the two supported formats (`binary` or `json`).

        Args:
            request_body (dict): The data sent with the request, containing at least `model_id`.
            return_format_default (Literal["binary", "json"]): The default return format to use if not specified in
                the request body.

        Returns:
            Tuple[Model, Optional[torch.nn.Module], Optional[List[Optional[int]]], str]: A tuple containing:
                * The retrieved model entity.
                * The global model's preprocessing torch module (if applicable).
                * The input shape of the global model (if applicable).
                * The return format (`binary` or `json`).

        Raises:
            ValidationError: If no valid `model_id` is provided in the request body, or if an unknown return format
                is specified.
        """
        if "model_id" not in request_body:
            self._logger.error("No 'model_id' provided in request.")
            raise ValidationError("No 'model_id' provided in request.")
        model_id = request_body["model_id"]
        model = get_entity(Model, pk=model_id)

        return_format = request_body.get("return_format", return_format_default)
        if return_format not in ["binary", "json"]:
            self._logger.error(f"Unknown return format '{return_format}'. Supported are binary and json.")
            raise ValidationError(f"Unknown return format '{return_format}'. Supported are binary and json.")

        global_model: Optional[GlobalModel] = None
        if isinstance(model, GlobalModel):
            global_model = model
        elif isinstance(model, LocalModel):
            global_model = model.base_model
        else:
            self._logger.error("Unknown model type. Not a GlobalModel and not a LocalModel. Skip preprocessing.")

        preprocessing: Optional[torch.nn.Module] = None
        input_shape: Optional[List[Optional[int]]] = None
        if global_model:
            if global_model.preprocessing is not None:
                preprocessing = global_model.get_preprocessing_torch_model()
            if global_model.input_shape is not None:
                input_shape = global_model.input_shape

        return model, preprocessing, input_shape, return_format

    def _get_model_input(self, request: HttpRequest, request_body: dict) -> Any:
        """
        Retrieves and decodes the model input from either an uploaded file or the request body.

        Args:
            request (HttpRequest): The current HTTP request.
            request_body (dict): The parsed request body as a dictionary.

        Returns:
            Any: The decoded model input data.

        Raises:
            ValidationError: If no `model_input` is found in the uploaded file or the request body.
        """
        uploaded_file = request.FILES.get("model_input", None)
        if uploaded_file and uploaded_file.file:
            model_input = uploaded_file.file.read()
        else:
            model_input = request_body.get("model_input", None)
        if not model_input:
            raise ValidationError("No uploaded file 'model_input' found.")
        return self._try_decode_model_input(model_input)

    def _try_decode_model_input(self, model_input: Any) -> Any:
        """
        Attempts to decode the input `model_input` from various formats and returns it in a usable form.

        This function first tries to deserialize the input as a PyTorch tensor. If that fails, it attempts to
        decode the input as a base64-encoded string. If neither attempt is successful, the original input is returned.

        Args:
            model_input (Any): The input to be decoded, which can be in any format.

        Returns:
            Any: The decoded input, which may still be in an unknown format if decoding attempts fail.
        """
        # 1. try to deserialize model_input as PyTorch tensor
        try:
            with disable_logger(self._logger):
                model_input = to_torch_tensor(model_input)
        except Exception:
            pass
        # 2. try to decode model_input as base64
        try:
            is_base64, tmp_model_input = self._is_base64(model_input)
            if is_base64:
                model_input = tmp_model_input
        except Exception:
            pass
        # result
        return model_input

    def _try_cast_model_input_to_tensor(self, model_input: Any) -> Any:
        """
        Attempt to cast the given model input to a PyTorch tensor.

        This function tries to interpret the input in several formats:

        1. PIL Image (and later convert it to a PyTorch tensor, see 3.)
        2. PyTorch tensor via `torch.as_tensor`
        3. PyTorch tensor via torchvision `ToTensor` (supports e.g. PIL images)

        If none of these attempts are successful, the original input is returned.

        Args:
            model_input: The input data to be cast to a PyTorch tensor.
                Can be any type that can be converted to a tensor.

        Returns:
            A PyTorch tensor representation of the input data, or the original
            input if it cannot be converted.
        """
        def _try_to_pil_image(model_input: Any) -> Any:
            stream = BytesIO(model_input)
            return Image.open(stream)

        if isinstance(model_input, torch.Tensor):
            return model_input

        # In the following order, try to:
        # 1. interpret model_input as PIL image (and later to PyTorch tensor, see step 3),
        # 2. interpret model_input as PyTorch tensor,
        # 3. interpret model_input as PyTorch tensor via torchvision ToTensor (supports e.g. PIL images).
        for fn in [_try_to_pil_image, torch.as_tensor, to_tensor]:
            try:
                model_input = fn(model_input)  # type: ignore
            except Exception:
                pass
        return model_input

    def _is_base64(self, sb: str | bytes) -> Tuple[bool, bytes]:
        """
        Check if a string or bytes object is a valid Base64 encoded string.

        This function checks if the input can be decoded and re-encoded without any changes.
        If decoding and encoding returns the same result as the original input, it's likely
        that the input was indeed a valid Base64 encoded string.

        Note: This code is based on the reference implementation from the linked Stack Overflow answer.

        Args:
            sb (str | bytes): The input string or bytes object to check.

        Returns:
            Tuple[bool, bytes]: A tuple containing a boolean indicating whether the input is
                a valid Base64 encoded string and the decoded bytes if it is.

        References:
            https://stackoverflow.com/a/45928164
        """
        try:
            if isinstance(sb, str):
                # If there's any unicode here, an exception will be thrown and the function will return false
                sb_bytes = bytes(sb, "ascii")
            elif isinstance(sb, bytes):
                sb_bytes = sb
            else:
                raise ValueError("Argument must be string or bytes")
            decoded = base64.b64decode(sb_bytes)
            return base64.b64encode(decoded) == sb_bytes, decoded
        except Exception:
            return False, b""

    def _validate_model_input_after_preprocessing(
        self,
        model_input: Any,
        model_input_shape: Optional[List[Optional[int]]],
        preprocessing: bool
    ) -> None:
        """
        Validates the model input after preprocessing.

        Ensures that the provided `model_input` is a valid PyTorch tensor and its shape matches
        the expected`model_input_shape`.

        Args:
            model_input (Any): The model input to be validated.
            model_input_shape (Optional[List[Optional[int]]]): The expected shape of the model input.
                Can contain None values if not all dimensions are fixed (e.g. first dimension as batch size).
            preprocessing (bool): Whether a preprocessing model was defined or not. (Only for a better error message.)

        Raises:
            ValidationError: If the `model_input` is not a valid PyTorch tensor or
                its shape does not match the expected `model_input_shape`.
        """
        if not isinstance(model_input, torch.Tensor):
            msg = "Model input could not be casted or interpreted as a PyTorch tensor object"
            if preprocessing:
                msg += " and is still not a PyTorch tensor after preprecessing."
            else:
                msg += " and no preprecessing is defined."
            raise ValidationError(msg)

        if model_input_shape and not all(
            dim_input == dim_model
            for (dim_input, dim_model) in zip(model_input.shape, model_input_shape)
            if dim_model is not None
        ):
            raise ValidationError("Input shape does not match model input shape.")

    def _make_response(
        self,
        uncertainty_cls: Type[UncertaintyBase],
        inference: torch.Tensor,
        uncertainty: Any,
        return_type: str
    ) -> HttpResponse:
        """
        Build the response object with the result data.

        This method checks the return type and makes a response with the appropriate content type.

        If return_type is "binary", a binary-encoded response will be generated using pickle.
        Otherwise, a JSON response will be generated by serializing the uncertainty object using its to_json method.

        Args:
            uncertainty_cls (Type[UncertaintyBase]): The uncertainty class.
            inference (torch.Tensor): The inference.
            uncertainty (Any): The uncertainty.
            return_type (str): The return type.

        Returns:
            HttpResponse: The inference result response.
        """
        if return_type == "binary":
            response_bytes = pickle.dumps(dict(inference=inference, uncertainty=uncertainty))
            return HttpResponse(response_bytes, content_type="application/octet-stream")

        return HttpResponse(uncertainty_cls.to_json(inference, uncertainty), content_type="application/json")

    def _do_inference(
        self, model: Model, input_tensor: torch.Tensor
    ) -> Tuple[Type[UncertaintyBase], torch.Tensor, Dict[str, Any]]:
        """
        Perform inference on a given input tensor using the provided model.

        This methods retrieves the uncertainty class, performs the prediction.
        The output of this method consists of:

        * The uncertainty class used for inference
        * The result of the model's prediction on the input tensor
        * Any associated uncertainty for the prediction

        Args:
            model (Model): The model to perform inference with.
            input_tensor (torch.Tensor): Input tensor to pass through the model.

        Returns:
            Tuple[Type[UncertaintyBase], torch.Tensor, Dict[str, Any]]:
                A tuple containing the uncertainty class, prediction result, and any associated uncertainty.

        Raises:
            APIException: If an error occurs during inference
        """
        try:
            uncertainty_cls = get_uncertainty_class(model)
            inference, uncertainty = uncertainty_cls.prediction(input_tensor, model)
            return uncertainty_cls, inference, uncertainty
        except TorchDeserializationException as e:
            raise APIException(e) from e
        except Exception as e:
            self._logger.error(e)
            raise APIException("Internal Server Error occurred during inference!") from e

Ancestors (in MRO)

  • fl_server_api.views.base.ViewSet
  • rest_framework.viewsets.ViewSet
  • rest_framework.viewsets.ViewSetMixin
  • rest_framework.views.APIView
  • django.views.generic.base.View

Class variables

authentication_classes
basename
content_negotiation_class
description
detail
http_method_names
metadata_class
name
parser_classes
permission_classes
renderer_classes
schema
serializer_class

The serializer for the ViewSet.

settings
suffix
throttle_classes
versioning_class

Static methods

as_view

def as_view(
    actions=None,
    **initkwargs
)

Because of the way class based views create a closure around the

instantiated view, we need to totally reimplement .as_view, and slightly modify the view function that is created and returned.

View Source
    @classonlymethod
    def as_view(cls, actions=None, **initkwargs):
        """
        Because of the way class based views create a closure around the
        instantiated view, we need to totally reimplement `.as_view`,
        and slightly modify the view function that is created and returned.
        """
        # The name and description initkwargs may be explicitly overridden for
        # certain route configurations. eg, names of extra actions.
        cls.name = None
        cls.description = None

        # The suffix initkwarg is reserved for displaying the viewset type.
        # This initkwarg should have no effect if the name is provided.
        # eg. 'List' or 'Instance'.
        cls.suffix = None

        # The detail initkwarg is reserved for introspecting the viewset type.
        cls.detail = None

        # Setting a basename allows a view to reverse its action urls. This
        # value is provided by the router through the initkwargs.
        cls.basename = None

        # actions must not be empty
        if not actions:
            raise TypeError("The `actions` argument must be provided when "
                            "calling `.as_view()` on a ViewSet. For example "
                            "`.as_view({'get': 'list'})`")

        # sanitize keyword arguments
        for key in initkwargs:
            if key in cls.http_method_names:
                raise TypeError("You tried to pass in the %s method name as a "
                                "keyword argument to %s(). Don't do that."
                                % (key, cls.__name__))
            if not hasattr(cls, key):
                raise TypeError("%s() received an invalid keyword %r" % (
                    cls.__name__, key))

        # name and suffix are mutually exclusive
        if 'name' in initkwargs and 'suffix' in initkwargs:
            raise TypeError("%s() received both `name` and `suffix`, which are "
                            "mutually exclusive arguments." % (cls.__name__))

        def view(request, *args, **kwargs):
            self = cls(**initkwargs)

            if 'get' in actions and 'head' not in actions:
                actions['head'] = actions['get']

            # We also store the mapping of request methods to actions,
            # so that we can later set the action attribute.
            # eg. `self.action = 'list'` on an incoming GET request.
            self.action_map = actions

            # Bind methods to actions
            # This is the bit that's different to a standard view
            for method, action in actions.items():
                handler = getattr(self, action)
                setattr(self, method, handler)

            self.request = request
            self.args = args
            self.kwargs = kwargs

            # And continue as usual
            return self.dispatch(request, *args, **kwargs)

        # take name and docstring from class
        update_wrapper(view, cls, updated=())

        # and possible attributes set by decorators
        # like csrf_exempt from dispatch
        update_wrapper(view, cls.dispatch, assigned=())

        # We need to set these on the view function, so that breadcrumb
        # generation can pick out these bits of information from a
        # resolved URL.
        view.cls = cls
        view.initkwargs = initkwargs
        view.actions = actions
        return csrf_exempt(view)

get_extra_actions

def get_extra_actions()

Get the methods that are marked as an extra ViewSet @action.

View Source
    @classmethod
    def get_extra_actions(cls):
        """
        Get the methods that are marked as an extra ViewSet `@action`.
        """
        return [_check_attr_name(method, name)
                for name, method
                in getmembers(cls, _is_extra_action)]

Instance variables

allowed_methods

Wrap Django's private _allowed_methods interface in a public property.

default_response_headers

Methods

check_object_permissions

def check_object_permissions(
    self,
    request,
    obj
)

Check if the request should be permitted for a given object.

Raises an appropriate exception if the request is not permitted.

View Source
    def check_object_permissions(self, request, obj):
        """
        Check if the request should be permitted for a given object.
        Raises an appropriate exception if the request is not permitted.
        """
        for permission in self.get_permissions():
            if not permission.has_object_permission(request, self, obj):
                self.permission_denied(
                    request,
                    message=getattr(permission, 'message', None),
                    code=getattr(permission, 'code', None)
                )

check_permissions

def check_permissions(
    self,
    request
)

Check if the request should be permitted.

Raises an appropriate exception if the request is not permitted.

View Source
    def check_permissions(self, request):
        """
        Check if the request should be permitted.
        Raises an appropriate exception if the request is not permitted.
        """
        for permission in self.get_permissions():
            if not permission.has_permission(request, self):
                self.permission_denied(
                    request,
                    message=getattr(permission, 'message', None),
                    code=getattr(permission, 'code', None)
                )

check_throttles

def check_throttles(
    self,
    request
)

Check if request should be throttled.

Raises an appropriate exception if the request is throttled.

View Source
    def check_throttles(self, request):
        """
        Check if request should be throttled.
        Raises an appropriate exception if the request is throttled.
        """
        throttle_durations = []
        for throttle in self.get_throttles():
            if not throttle.allow_request(request, self):
                throttle_durations.append(throttle.wait())

        if throttle_durations:
            # Filter out `None` values which may happen in case of config / rate
            # changes, see #1438
            durations = [
                duration for duration in throttle_durations
                if duration is not None
            ]

            duration = max(durations, default=None)
            self.throttled(request, duration)

determine_version

def determine_version(
    self,
    request,
    *args,
    **kwargs
)

If versioning is being used, then determine any API version for the

incoming request. Returns a two-tuple of (version, versioning_scheme)

View Source
    def determine_version(self, request, *args, **kwargs):
        """
        If versioning is being used, then determine any API version for the
        incoming request. Returns a two-tuple of (version, versioning_scheme)
        """
        if self.versioning_class is None:
            return (None, None)
        scheme = self.versioning_class()
        return (scheme.determine_version(request, *args, **kwargs), scheme)

dispatch

def dispatch(
    self,
    request,
    *args,
    **kwargs
)

.dispatch() is pretty much the same as Django's regular dispatch,

but with extra hooks for startup, finalize, and exception handling.

View Source
    def dispatch(self, request, *args, **kwargs):
        """
        `.dispatch()` is pretty much the same as Django's regular dispatch,
        but with extra hooks for startup, finalize, and exception handling.
        """
        self.args = args
        self.kwargs = kwargs
        request = self.initialize_request(request, *args, **kwargs)
        self.request = request
        self.headers = self.default_response_headers  # deprecate?

        try:
            self.initial(request, *args, **kwargs)

            # Get the appropriate handler method
            if request.method.lower() in self.http_method_names:
                handler = getattr(self, request.method.lower(),
                                  self.http_method_not_allowed)
            else:
                handler = self.http_method_not_allowed

            response = handler(request, *args, **kwargs)

        except Exception as exc:
            response = self.handle_exception(exc)

        self.response = self.finalize_response(request, response, *args, **kwargs)
        return self.response

finalize_response

def finalize_response(
    self,
    request,
    response,
    *args,
    **kwargs
)

Returns the final response object.

View Source
    def finalize_response(self, request, response, *args, **kwargs):
        """
        Returns the final response object.
        """
        # Make the error obvious if a proper response is not returned
        assert isinstance(response, HttpResponseBase), (
            'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '
            'to be returned from the view, but received a `%s`'
            % type(response)
        )

        if isinstance(response, Response):
            if not getattr(request, 'accepted_renderer', None):
                neg = self.perform_content_negotiation(request, force=True)
                request.accepted_renderer, request.accepted_media_type = neg

            response.accepted_renderer = request.accepted_renderer
            response.accepted_media_type = request.accepted_media_type
            response.renderer_context = self.get_renderer_context()

        # Add new vary headers to the response instead of overwriting.
        vary_headers = self.headers.pop('Vary', None)
        if vary_headers is not None:
            patch_vary_headers(response, cc_delim_re.split(vary_headers))

        for key, value in self.headers.items():
            response[key] = value

        return response

get_authenticate_header

def get_authenticate_header(
    self,
    request
)

If a request is unauthenticated, determine the WWW-Authenticate

header to use for 401 responses, if any.

View Source
    def get_authenticate_header(self, request):
        """
        If a request is unauthenticated, determine the WWW-Authenticate
        header to use for 401 responses, if any.
        """
        authenticators = self.get_authenticators()
        if authenticators:
            return authenticators[0].authenticate_header(request)

get_authenticators

def get_authenticators(
    self
)

Get the authenticators for the ViewSet.

This method gets the view method and, if it has authentication classes defined via the decorator, returns them. Otherwise, it falls back to the default authenticators.

Returns:

Type Description
list The authenticators for the ViewSet.
View Source
    def get_authenticators(self):
        """
        Get the authenticators for the ViewSet.

        This method gets the view method and, if it has authentication classes defined via the decorator, returns them.
        Otherwise, it falls back to the default authenticators.

        Returns:
            list: The authenticators for the ViewSet.
        """
        if method := self._get_view_method():
            if hasattr(method, "authentication_classes"):
                return method.authentication_classes
        return super().get_authenticators()

get_content_negotiator

def get_content_negotiator(
    self
)

Instantiate and return the content negotiation class to use.

View Source
    def get_content_negotiator(self):
        """
        Instantiate and return the content negotiation class to use.
        """
        if not getattr(self, '_negotiator', None):
            self._negotiator = self.content_negotiation_class()
        return self._negotiator

get_exception_handler

def get_exception_handler(
    self
)

Returns the exception handler that this view uses.

View Source
    def get_exception_handler(self):
        """
        Returns the exception handler that this view uses.
        """
        return self.settings.EXCEPTION_HANDLER

get_exception_handler_context

def get_exception_handler_context(
    self
)

Returns a dict that is passed through to EXCEPTION_HANDLER,

as the context argument.

View Source
    def get_exception_handler_context(self):
        """
        Returns a dict that is passed through to EXCEPTION_HANDLER,
        as the `context` argument.
        """
        return {
            'view': self,
            'args': getattr(self, 'args', ()),
            'kwargs': getattr(self, 'kwargs', {}),
            'request': getattr(self, 'request', None)
        }

get_extra_action_url_map

def get_extra_action_url_map(
    self
)

Build a map of {names: urls} for the extra actions.

This method will noop if detail was not provided as a view initkwarg.

View Source
    def get_extra_action_url_map(self):
        """
        Build a map of {names: urls} for the extra actions.

        This method will noop if `detail` was not provided as a view initkwarg.
        """
        action_urls = OrderedDict()

        # exit early if `detail` has not been provided
        if self.detail is None:
            return action_urls

        # filter for the relevant extra actions
        actions = [
            action for action in self.get_extra_actions()
            if action.detail == self.detail
        ]

        for action in actions:
            try:
                url_name = '%s-%s' % (self.basename, action.url_name)
                namespace = self.request.resolver_match.namespace
                if namespace:
                    url_name = '%s:%s' % (namespace, url_name)

                url = reverse(url_name, self.args, self.kwargs, request=self.request)
                view = self.__class__(**action.kwargs)
                action_urls[view.get_view_name()] = url
            except NoReverseMatch:
                pass  # URL requires additional arguments, ignore

        return action_urls

get_format_suffix

def get_format_suffix(
    self,
    **kwargs
)

Determine if the request includes a '.json' style format suffix

View Source
    def get_format_suffix(self, **kwargs):
        """
        Determine if the request includes a '.json' style format suffix
        """
        if self.settings.FORMAT_SUFFIX_KWARG:
            return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)

get_parser_context

def get_parser_context(
    self,
    http_request
)

Returns a dict that is passed through to Parser.parse(),

as the parser_context keyword argument.

View Source
    def get_parser_context(self, http_request):
        """
        Returns a dict that is passed through to Parser.parse(),
        as the `parser_context` keyword argument.
        """
        # Note: Additionally `request` and `encoding` will also be added
        #       to the context by the Request object.
        return {
            'view': self,
            'args': getattr(self, 'args', ()),
            'kwargs': getattr(self, 'kwargs', {})
        }

get_parsers

def get_parsers(
    self
)

Instantiates and returns the list of parsers that this view can use.

View Source
    def get_parsers(self):
        """
        Instantiates and returns the list of parsers that this view can use.
        """
        return [parser() for parser in self.parser_classes]

get_permissions

def get_permissions(
    self
)

Get the permissions for the ViewSet.

This method gets the view method and, if it has permission classes defined via the decorator, returns them. Otherwise, it falls back to the default permissions.

Returns:

Type Description
list The permissions for the ViewSet.
View Source
    def get_permissions(self):
        """
        Get the permissions for the ViewSet.

        This method gets the view method and, if it has permission classes defined via the decorator, returns them.
        Otherwise, it falls back to the default permissions.

        Returns:
            list: The permissions for the ViewSet.
        """
        if method := self._get_view_method():
            if hasattr(method, "permission_classes"):
                return method.permission_classes
        return super().get_permissions()

get_renderer_context

def get_renderer_context(
    self
)

Returns a dict that is passed through to Renderer.render(),

as the renderer_context keyword argument.

View Source
    def get_renderer_context(self):
        """
        Returns a dict that is passed through to Renderer.render(),
        as the `renderer_context` keyword argument.
        """
        # Note: Additionally 'response' will also be added to the context,
        #       by the Response object.
        return {
            'view': self,
            'args': getattr(self, 'args', ()),
            'kwargs': getattr(self, 'kwargs', {}),
            'request': getattr(self, 'request', None)
        }

get_renderers

def get_renderers(
    self
)

Instantiates and returns the list of renderers that this view can use.

View Source
    def get_renderers(self):
        """
        Instantiates and returns the list of renderers that this view can use.
        """
        return [renderer() for renderer in self.renderer_classes]

get_throttles

def get_throttles(
    self
)

Instantiates and returns the list of throttles that this view uses.

View Source
    def get_throttles(self):
        """
        Instantiates and returns the list of throttles that this view uses.
        """
        return [throttle() for throttle in self.throttle_classes]

get_view_description

def get_view_description(
    self,
    html=False
)

Return some descriptive text for the view, as used in OPTIONS responses

and in the browsable API.

View Source
    def get_view_description(self, html=False):
        """
        Return some descriptive text for the view, as used in OPTIONS responses
        and in the browsable API.
        """
        func = self.settings.VIEW_DESCRIPTION_FUNCTION
        return func(self, html)

get_view_name

def get_view_name(
    self
)

Return the view name, as used in OPTIONS responses and in the

browsable API.

View Source
    def get_view_name(self):
        """
        Return the view name, as used in OPTIONS responses and in the
        browsable API.
        """
        func = self.settings.VIEW_NAME_FUNCTION
        return func(self)

handle_exception

def handle_exception(
    self,
    exc
)

Handle any exception that occurs, by returning an appropriate response,

or re-raising the error.

View Source
    def handle_exception(self, exc):
        """
        Handle any exception that occurs, by returning an appropriate response,
        or re-raising the error.
        """
        if isinstance(exc, (exceptions.NotAuthenticated,
                            exceptions.AuthenticationFailed)):
            # WWW-Authenticate header for 401 responses, else coerce to 403
            auth_header = self.get_authenticate_header(self.request)

            if auth_header:
                exc.auth_header = auth_header
            else:
                exc.status_code = status.HTTP_403_FORBIDDEN

        exception_handler = self.get_exception_handler()

        context = self.get_exception_handler_context()
        response = exception_handler(exc, context)

        if response is None:
            self.raise_uncaught_exception(exc)

        response.exception = True
        return response

http_method_not_allowed

def http_method_not_allowed(
    self,
    request,
    *args,
    **kwargs
)

If request.method does not correspond to a handler method,

determine what kind of exception to raise.

View Source
    def http_method_not_allowed(self, request, *args, **kwargs):
        """
        If `request.method` does not correspond to a handler method,
        determine what kind of exception to raise.
        """
        raise exceptions.MethodNotAllowed(request.method)

inference

def inference(
    self,
    request: django.http.request.HttpRequest
) -> django.http.response.HttpResponse

Performs inference on the provided model and input data.

This method takes in an HTTP request containing the necessary metadata and input data, performs any required preprocessing on the input data, runs the inference using the specified model, and returns a response in the format specified by the return_format parameter including possible uncertainty measurements if defined.

Parameters:

Name Type Description Default
request HttpRequest The current HTTP request. None

Returns:

Type Description
HttpResponse A HttpResponse containing the result of the inference as well as its uncertainty.
View Source
    @extend_schema(
        request=inline_serializer(
            "InferenceJsonSerializer",
            fields={
                "model_id": CharField(),
                "model_input": ListField(child=ListField(child=FloatField())),
                "return_format": ChoiceField(["binary", "json"])
            }
        ),
        responses={
            status.HTTP_200_OK: serializer_class,
            status.HTTP_400_BAD_REQUEST: ErrorSerializer,
        },
        examples=[
            OpenApiExample("JSON Example", value={
                "model_id": "mymodel",
                "model_input": [
                    [1.0, 2.3, -0.4, 3],
                    [0.01, 9.7, 5.6, 7]
                ],
                "return_format": "json"
            }, request_only=True),
        ]
    )
    def inference(self, request: HttpRequest) -> HttpResponse:
        """
        Performs inference on the provided model and input data.

        This method takes in an HTTP request containing the necessary metadata and input data,
        performs any required preprocessing on the input data, runs the inference using the specified model,
        and returns a response in the format specified by the `return_format` parameter including
        possible uncertainty measurements if defined.

        Args:
            request (HttpRequest): The current HTTP request.

        Returns:
            HttpResponse: A HttpResponse containing the result of the inference as well as its uncertainty.
        """
        request_body, is_json = self._get_handle_content_type(request)
        model, preprocessing, input_shape, return_format = self._get_inference_metadata(
            request_body,
            "json" if is_json else "binary"
        )
        model_input = self._get_model_input(request, request_body)

        if preprocessing:
            model_input = preprocessing(model_input)
        else:
            # if no preprocessing is defined, at least try to convert/interpret the model_input as
            # PyTorch tensor, before raising an exception
            model_input = self._try_cast_model_input_to_tensor(model_input)
        self._validate_model_input_after_preprocessing(model_input, input_shape, bool(preprocessing))

        uncertainty_cls, inference, uncertainty = self._do_inference(model, model_input)
        return self._make_response(uncertainty_cls, inference, uncertainty, return_format)

initial

def initial(
    self,
    request,
    *args,
    **kwargs
)

Runs anything that needs to occur prior to calling the method handler.

View Source
    def initial(self, request, *args, **kwargs):
        """
        Runs anything that needs to occur prior to calling the method handler.
        """
        self.format_kwarg = self.get_format_suffix(**kwargs)

        # Perform content negotiation and store the accepted info on the request
        neg = self.perform_content_negotiation(request)
        request.accepted_renderer, request.accepted_media_type = neg

        # Determine the API version, if versioning is in use.
        version, scheme = self.determine_version(request, *args, **kwargs)
        request.version, request.versioning_scheme = version, scheme

        # Ensure that the incoming request is permitted
        self.perform_authentication(request)
        self.check_permissions(request)
        self.check_throttles(request)

initialize_request

def initialize_request(
    self,
    request,
    *args,
    **kwargs
)

Set the .action attribute on the view, depending on the request method.

View Source
    def initialize_request(self, request, *args, **kwargs):
        """
        Set the `.action` attribute on the view, depending on the request method.
        """
        request = super().initialize_request(request, *args, **kwargs)
        method = request.method.lower()
        if method == 'options':
            # This is a special case as we always provide handling for the
            # options method in the base `View` class.
            # Unlike the other explicitly defined actions, 'metadata' is implicit.
            self.action = 'metadata'
        else:
            self.action = self.action_map.get(method)
        return request

options

def options(
    self,
    request,
    *args,
    **kwargs
)

Handler method for HTTP 'OPTIONS' request.

View Source
    def options(self, request, *args, **kwargs):
        """
        Handler method for HTTP 'OPTIONS' request.
        """
        if self.metadata_class is None:
            return self.http_method_not_allowed(request, *args, **kwargs)
        data = self.metadata_class().determine_metadata(request, self)
        return Response(data, status=status.HTTP_200_OK)

perform_authentication

def perform_authentication(
    self,
    request
)

Perform authentication on the incoming request.

Note that if you override this and simply 'pass', then authentication will instead be performed lazily, the first time either request.user or request.auth is accessed.

View Source
    def perform_authentication(self, request):
        """
        Perform authentication on the incoming request.

        Note that if you override this and simply 'pass', then authentication
        will instead be performed lazily, the first time either
        `request.user` or `request.auth` is accessed.
        """
        request.user

perform_content_negotiation

def perform_content_negotiation(
    self,
    request,
    force=False
)

Determine which renderer and media type to use render the response.

View Source
    def perform_content_negotiation(self, request, force=False):
        """
        Determine which renderer and media type to use render the response.
        """
        renderers = self.get_renderers()
        conneg = self.get_content_negotiator()

        try:
            return conneg.select_renderer(request, renderers, self.format_kwarg)
        except Exception:
            if force:
                return (renderers[0], renderers[0].media_type)
            raise

permission_denied

def permission_denied(
    self,
    request,
    message=None,
    code=None
)

If request is not permitted, determine what kind of exception to raise.

View Source
    def permission_denied(self, request, message=None, code=None):
        """
        If request is not permitted, determine what kind of exception to raise.
        """
        if request.authenticators and not request.successful_authenticator:
            raise exceptions.NotAuthenticated()
        raise exceptions.PermissionDenied(detail=message, code=code)

raise_uncaught_exception

def raise_uncaught_exception(
    self,
    exc
)
View Source
    def raise_uncaught_exception(self, exc):
        if settings.DEBUG:
            request = self.request
            renderer_format = getattr(request.accepted_renderer, 'format')
            use_plaintext_traceback = renderer_format not in ('html', 'api', 'admin')
            request.force_plaintext_errors(use_plaintext_traceback)
        raise exc

reverse_action

def reverse_action(
    self,
    url_name,
    *args,
    **kwargs
)

Reverse the action for the given url_name.

View Source
    def reverse_action(self, url_name, *args, **kwargs):
        """
        Reverse the action for the given `url_name`.
        """
        url_name = '%s-%s' % (self.basename, url_name)
        namespace = None
        if self.request and self.request.resolver_match:
            namespace = self.request.resolver_match.namespace
        if namespace:
            url_name = namespace + ':' + url_name
        kwargs.setdefault('request', self.request)

        return reverse(url_name, *args, **kwargs)

setup

def setup(
    self,
    request,
    *args,
    **kwargs
)

Initialize attributes shared by all view methods.

View Source
    def setup(self, request, *args, **kwargs):
        """Initialize attributes shared by all view methods."""
        if hasattr(self, "get") and not hasattr(self, "head"):
            self.head = self.get
        self.request = request
        self.args = args
        self.kwargs = kwargs

throttled

def throttled(
    self,
    request,
    wait
)

If request is throttled, determine what kind of exception to raise.

View Source
    def throttled(self, request, wait):
        """
        If request is throttled, determine what kind of exception to raise.
        """
        raise exceptions.Throttled(wait)