whylogs_container.whylabs.container package#

Subpackages#

Submodules#

whylogs_container.whylabs.container.auth module#

class whylogs_container.whylabs.container.auth.Auth#

Bases: object

api_key_auth(api_key: str = Depends(APIKeyHeader)) None#

whylogs_container.whylabs.container.config module#

class whylogs_container.whylabs.container.config.ConfigActor(config_instance: ConfigInstance)#

Bases: ProcessActor[RefreshMessage, None], SynchronousConfig

close_process() None#
handle_refresh_message(messages: List[RefreshMessage]) None#
is_process_alive() bool#
process_batch(batch, batch_type) None#
process_close_message(_messages: List[CloseMessage]) None#
run() None#

Method to be run in sub-process; can be overridden in sub-class

start_process(pre_warm: bool = False) None#
class whylogs_container.whylabs.container.config.ConfigInstance(config: whylogs_container.whylabs.container.config.ConfigState)#

Bases: object

config: ConfigState#

The current configuration state. This can only be updated by the ConfigActor in order to handle concurrency properly with multiple processes.

class whylogs_container.whylabs.container.config.ConfigManager(address=None, authkey=None, serializer='pickle', ctx=None)#

Bases: BaseManager

ConfigInstance(*args, **kwds)#
class whylogs_container.whylabs.container.config.ConfigProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True, manager_owned=False)#

Bases: NamespaceProxy

class whylogs_container.whylabs.container.config.ConfigState(local_policies: Tuple[whylogs_container.whylabs.container.policy.policy_0_0_1.Policy, ...] = <factory>, s3_policies: Tuple[whylogs_container.whylabs.container.policy.policy_0_0_1.Policy, ...] = <factory>, whylabs_policies: Tuple[whylogs_container.whylabs.container.policy.policy_0_0_1.Policy, ...] = <factory>, create_time: datetime.datetime = <factory>)#

Bases: object

create_time: datetime#
get_container_config() ContainerConfiguration | None#
get_dataset_options(dataset_id: str) DatasetOptions#
get_default_langkit_workflow() Workflow#
get_default_whylogs_schema() DatasetOptions#
get_langkit_options() Mapping[str, Workflow]#
get_langkit_score_workflow(dataset_id: str) Workflow | None#
get_langkit_workflow(dataset_id: str) Workflow#
get_policies() Mapping[str, Policy]#
get_policy(dataset_id: str) Policy | None#
get_policy_langkit_options() Mapping[str, LangkitOptions]#

Returns the langkit options derived from all of the known policy files, as though they were originally defined in the custom configuration.

local_policies: Tuple[Policy, ...]#
s3_policies: Tuple[Policy, ...]#
whylabs_policies: Tuple[Policy, ...]#
class whylogs_container.whylabs.container.config.RefreshMessage(id: str = <factory>)#

Bases: object

id: str#
class whylogs_container.whylabs.container.config.SynchronousConfig#

Bases: ABC

abstract close_process() None#
abstract is_process_alive() bool#
refresh() None#
abstract start_process(pre_warm: bool = False) None#

whylogs_container.whylabs.container.config_test module#

whylogs_container.whylabs.container.config_test.test_indexing_into_toolkit() None#

whylogs_container.whylabs.container.environment module#

class whylogs_container.whylabs.container.environment.EnvVarNames(value)#

Bases: Enum

This Enum class contains all the environment variable names used in the application.

Required#

  • WHYLABS_API_KEY

  • DEFAULT_WHYLABS_ORG_ID if you’re using an old api key that doesn’t have an org id at the end of it.

Optional#

  • DEFAULT_WHYLABS_DATASET_CADENCE

  • DEFAULT_WHYLABS_UPLOAD_CADENCE

  • DEFAULT_WHYLABS_UPLOAD_INTERVAL

  • CONTAINER_PASSWORD. Required if DISABLE_CONTAINER_PASSWORD is not set to True.

  • DISABLE_CONTAINER_PASSWORD

  • FAIL_STARTUP_WITHOUT_CONFIG

AUTO_PULL_WHYLABS_POLICY_MODEL_IDS = 'AUTO_PULL_WHYLABS_POLICY_MODEL_IDS'#

Optional. If set, the container will automatically pull policies from WhyLabs for the given model ids.

CONFIG_SYNC_CADENCE = 'CONFIG_SYNC_CADENCE'#

Optional. The cadence at which the container will sync remote policies. Can be H (hourly), M (minute), or D (daily). Defaults to MINUTE.

CONFIG_SYNC_INTERVAL = 'CONFIG_SYNC_INTERVAL'#

Optional. The interval at which the container will sync remote policies. This is the number of units of time (hours, minutes, or days) that the container will wait before syncing the remote policies. Defaults to 15.

CONTAINER_PASSWORD = 'CONTAINER_PASSWORD'#

Password for the container. The container looks for this password in a header for each request. This is the curl format for sending the header: -H “Authorization: Bearer my_password”

DEFAULT_WHYLABS_DATASET_CADENCE = 'DEFAULT_WHYLABS_DATASET_CADENCE'#

Can be DAILY (default) or HOURLY. This is used whenever the cadence is not specified in the dataset schema, or if you don’t specify a dataset schema. This determines how data is bucketed in the container. Data for each profile will be pooled by the hour, minute, or day.

DEFAULT_WHYLABS_ORG_ID = 'DEFAULT_WHYLABS_ORG_ID'#

Required. Organization ID for WhyLabs.

DEFAULT_WHYLABS_UPLOAD_CADENCE = 'DEFAULT_WHYLABS_UPLOAD_CADENCE'#

Can be H (default), M, or D. This determines how often profiles are uploaded to WhyLabs.

DEFAULT_WHYLABS_UPLOAD_INTERVAL = 'DEFAULT_WHYLABS_UPLOAD_INTERVAL'#

Default interval for WhyLabs uploads. This determines how many units of time (hours, minutes, or days)

DISABLE_CONTAINER_PASSWORD = 'DISABLE_CONTAINER_PASSWORD'#

If set to True, the container will not require a password for requests. Then you can omit the CONTAINER_PASSWORD environment variable.

DISABLE_TRACING = 'DISABLE_TRACING'#

Optional. If set to True, the container will not send trace data to WhyLabs.

FAIL_STARTUP_WITHOUT_CONFIG = 'FAIL_STARTUP_WITHOUT_CONFIG'#

If set to True, the container will fail to start if no custom configuration is found. This is a safeguard if you want to make sure you set up the container correctly and you’re using python to configure dataset schemas for whylogs.

FAIL_STARTUP_WITHOUT_POLICIES = 'FAIL_STARTUP_WITHOUT_POLICIES'#

Just like FAIL_STARTUP_WITHOUT_CONFIG but for yaml policies instead of custom python configuration.

LLM_CONTAINER = 'LLM_CONTAINER'#

Env var to identify if the deployment is of the type LLM or not. Defaults to False, but set to True on LLM image build

LOG_LEVEL = 'LOG_LEVEL'#

Defaults to INFO. The log level for the container. Can be DEBUG, INFO, WARNING, ERROR, or CRITICAL.

MAX_REQUEST_BATCH_SIZE = 'MAX_REQUEST_BATCH_SIZE'#

Defaults to 50_000. Maximum number of requests to process per batch. Requests are placed onto a queue after they come in from the REST server process and they’re read from that queue by the whylogs process and handled in bulk. The larger the batch, the longer it takes to handle each batch. This is mostly transparent to callers. This ends up mattering when the container has so many requests queue’d up because throughput is too high that it can’t keep up. In that case, the requestors will end up waiting for the batches to be processed, which could potentially take minutes.

You would consider lowering this number if you’re seeing a lot of requests timing out and you have high tps (relative to our whylogs container doc page’s performance load testing numbers).

MAX_REQUEST_BUFFER_BYTES = 'MAX_REQUEST_BUFFER_BYTES'#

Defaults to 1_000_000_000 (1GB). The size of the buffer that requests are put onto after they come in from the REST server process. Increasing this would give you more time to process requests before they start timing out. It could be a good idea to increase this number if you’re seeing request timeouts for spikey traffic patterns, or if you have more memory on the host and nothing better to do with it.

POLICY_BASE_DIR = 'POLICY_CONFIG_BASE_DIR'#

Env var to identify the root directory for policy configs. Defaults to /whylogs_container/whylogs_config/

S3_CONFIG_BUCKET_NAME = 'S3_CONFIG_BUCKET_NAME'#

Optional. The name of the S3 bucket where the container will look for YAML files to keep in sync with.

S3_CONFIG_PREFIX = 'S3_CONFIG_PREFIX'#

Optional. The prefix of the S3 bucket where the container will look for YAML files to keep in sync with. If not defined, it will look for files on the bucket’s root.

S3_CONFIG_SYNC = 'S3_CONFIG_SYNC'#

Optional. If set to True, the container will sync S3 YAML config files with the container and parse them into schemas.

S3_CONFIG_SYNC_CADENCE = 'S3_CONFIG_SYNC_CADENCE'#

DEPRECATED, use CONFIG_SYNC_CADENCE. use Optional. The cadence at which the container will sync the S3 bucket’s YAML files. Can be H (hourly), M (minute), or D (daily). Defaults to MINUTE.

S3_CONFIG_SYNC_INTERVAL = 'S3_CONFIG_SYNC_INTERVAL'#

DEPRECATED, use CONFIG_SYNC_INTERVAL. Optional. The interval at which the container will sync the S3 bucket’s YAML files. This is the number of units of time (hours, minutes, or days) that the container will wait before syncing the S3 bucket with container schemas. Defaults to 15.

S3_CONFIG_SYNC_ROLE_ARN = 'S3_CONFIG_SYNC_ROLE_ARN'#

Optional. The role ARN to assume when syncing S3 bucket’s YAML files. If not defined, the container will use the look for the standard AWS credentials in the environment.

TRACE_ENDPOINT = 'TRACE_ENDPOINT'#

Optional. The endpoint to send trace data to. If not set, traces are sent to WhyLabs production.

WHYLABS_API_KEY = 'WHYLABS_API_KEY'#

Required. API key for WhyLabs.

class whylogs_container.whylabs.container.environment.EnvironmentVariables#

Bases: object

assert_llm_endpoint() None#
auth_disabled() bool#
auto_pull_whylabs_policy_model_ids: Sequence[str]#
config_sync_cadence: DatasetUploadCadenceGranularity#
config_sync_interval: int#
container_password: str | None#
default_dataset_cadence: DatasetCadence#
default_whylabs_org_id: str | None#
default_whylabs_upload_cadence: DatasetUploadCadenceGranularity#
default_whylabs_upload_interval: int#
disable_container_password: bool#
disable_tracing: bool#
fail_startup_without_config: bool#
fail_startup_without_policies: bool#
llm_container: bool#
log_level: int#
policy_base_dir: str#
s3_config_bucket_name: str#
s3_config_prefix: str#
s3_config_sync: bool#
s3_config_sync_cadence: DatasetUploadCadenceGranularity#
s3_config_sync_interval: int#
trace_endpoint: str#
whylabs_api_key: str#

whylogs_container.whylabs.container.file_read_utils module#

whylogs_container.whylabs.container.file_read_utils.load_policy_from_str(file: str) Policy#

whylogs_container.whylabs.container.lazy module#

class whylogs_container.whylabs.container.lazy.LazyInit(init: Callable[[], T])#

Bases: Generic[T]

property value: T#

whylogs_container.whylabs.container.otel module#

class whylogs_container.whylabs.container.otel.AttributePrefix(value)#

Bases: Enum

An enumeration.

DatasetId = 'whylabs.dataset_id'#
Metric = 'whylabs.secure.metrics'#
MetricLatency = 'whylabs.secure.latency'#
Policy = 'whylabs.secure.policy'#
Score = 'whylabs.secure.score'#
Tag = 'langkit.insights.tags'#
Version = 'guardrails_container.version'#
WorkflowAction = 'whylabs.secure.action'#
class whylogs_container.whylabs.container.otel.FilteringBulkProcessor(span_exporter: SpanExporter, max_queue_size: int | None = None, schedule_delay_millis: float | None = None, max_export_batch_size: int | None = None, export_timeout_millis: float | None = None)#

Bases: BatchSpanProcessor

on_end(span: ReadableSpan) None#

Called when a opentelemetry.trace.Span is ended.

This method is called synchronously on the thread that ends the span, therefore it should not block or throw an exception.

Args:

span: The opentelemetry.trace.Span that just ended.

whylogs_container.whylabs.container.otel.get_current_carrier() Dict[str, Any]#
whylogs_container.whylabs.container.otel.get_tracer() Tracer#
whylogs_container.whylabs.container.otel.init_otel(app: FastAPI)#
whylogs_container.whylabs.container.otel.trace_langkit_result(result: WorkflowResult, score_result: WorkflowResult | None, config: ConfigInstance, dataset_id: str, start_time_ns: int, end_time_ns: int, carrier: Dict[str, Any]) None#

whylogs_container.whylabs.container.policy_downloader module#

whylogs_container.whylabs.container.policy_downloader.download_policy(dataset_id: str) str | None#

whylogs_container.whylabs.container.requests module#

class whylogs_container.whylabs.container.requests.DebugLLMValidateRequest(*, prompt: str | None = None, response: str | None = None, context: InputContext | None = None, id: str | None = None, datasetId: str, timestamp: int = None, additional_data: Dict[str, str | int | float] = None, options: RunOptions | None = None, policy: str)#

Bases: LLMValidateRequest

model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'additional_data': FieldInfo(annotation=Dict[str, Union[str, int, float]], required=False, default_factory=<lambda>), 'context': FieldInfo(annotation=Union[InputContext, NoneType], required=False), 'dataset_id': FieldInfo(annotation=str, required=True, alias='datasetId', alias_priority=2), 'id': FieldInfo(annotation=Union[str, NoneType], required=False), 'options': FieldInfo(annotation=Union[RunOptions, NoneType], required=False), 'policy': FieldInfo(annotation=str, required=True), 'prompt': FieldInfo(annotation=Union[str, NoneType], required=False), 'response': FieldInfo(annotation=Union[str, NoneType], required=False), 'timestamp': FieldInfo(annotation=int, required=False, default_factory=<lambda>)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

policy: str#
class whylogs_container.whylabs.container.requests.Endpoints(value)#

Bases: Enum

An enumeration.

DebugEvaluate = '/debug/evaluate'#
Evaluate = '/evaluate'#
LogLLM = '/log/llm'#
class whylogs_container.whylabs.container.requests.LLMValidateRequest(*, prompt: str | None = None, response: str | None = None, context: InputContext | None = None, id: str | None = None, datasetId: str, timestamp: int = None, additional_data: Dict[str, str | int | float] = None, options: RunOptions | None = None)#

Bases: BaseModel

additional_data: Dict[str, str | int | float]#
context: InputContext | None#
dataset_id: str#
id: str | None#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'additional_data': FieldInfo(annotation=Dict[str, Union[str, int, float]], required=False, default_factory=<lambda>), 'context': FieldInfo(annotation=Union[InputContext, NoneType], required=False), 'dataset_id': FieldInfo(annotation=str, required=True, alias='datasetId', alias_priority=2), 'id': FieldInfo(annotation=Union[str, NoneType], required=False), 'options': FieldInfo(annotation=Union[RunOptions, NoneType], required=False), 'prompt': FieldInfo(annotation=Union[str, NoneType], required=False), 'response': FieldInfo(annotation=Union[str, NoneType], required=False), 'timestamp': FieldInfo(annotation=int, required=False, default_factory=<lambda>)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

options: RunOptions | None#
prompt: str | None#
response: str | None#
timestamp: int#
to_data_dict() DataDict#
to_dataframe() DataFrame#
to_row() Dict[str, str | InputContext]#
class whylogs_container.whylabs.container.requests.LogEmbeddingRequest(*, dataset_id: str, timestamp: int, embeddings: Dict[str, List[List[float]] | List[List[int]] | List[List[str]]])#

Bases: BaseModel

datasetId: str#
embeddings: Dict[str, List[List[float]] | List[List[int]] | List[List[str]]]#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'datasetId': FieldInfo(annotation=str, required=True, alias='dataset_id', alias_priority=2), 'embeddings': FieldInfo(annotation=Dict[str, Union[List[List[float]], List[List[int]], List[List[str]]]], required=True), 'timestamp': FieldInfo(annotation=int, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

timestamp: int#
class whylogs_container.whylabs.container.requests.LogMultiple(*, columns: Sequence[str], data: Sequence[Sequence[str | int | float | bool | List[float] | List[int] | List[str] | None]])#

Bases: BaseModel

columns: Sequence[str]#
data: Sequence[Sequence[str | int | float | bool | List[float] | List[int] | List[str] | None]]#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'columns': FieldInfo(annotation=Sequence[str], required=True), 'data': FieldInfo(annotation=Sequence[Sequence[Union[str, int, float, bool, List[float], List[int], List[str], NoneType]]], required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

class whylogs_container.whylabs.container.requests.LogRequest(*, datasetId: str, multiple: LogMultiple, timestamp: int | None = None)#

Bases: BaseModel

dataset_id: str#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'dataset_id': FieldInfo(annotation=str, required=True, alias='datasetId', alias_priority=2), 'multiple': FieldInfo(annotation=LogMultiple, required=True), 'timestamp': FieldInfo(annotation=Union[int, NoneType], required=False)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

multiple: LogMultiple#
timestamp: int | None#
class whylogs_container.whylabs.container.requests.PubSubMessage(*, attributes: Dict[str, str], data: str, message_id: str, publish_time: str)#

Bases: BaseModel

attributes: Dict[str, str]#
data: str#
messageId: str#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'attributes': FieldInfo(annotation=Dict[str, str], required=True), 'data': FieldInfo(annotation=str, required=True), 'messageId': FieldInfo(annotation=str, required=True, alias='message_id', alias_priority=2), 'publishTime': FieldInfo(annotation=str, required=True, alias='publish_time', alias_priority=2)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

publishTime: str#
class whylogs_container.whylabs.container.requests.PubSubRequest(*, subscription: str, message: PubSubMessage)#

Bases: BaseModel

message: PubSubMessage#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'message': FieldInfo(annotation=PubSubMessage, required=True), 'subscription': FieldInfo(annotation=str, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

subscription: str#

whylogs_container.whylabs.container.responses module#

class whylogs_container.whylabs.container.responses.AvailableMetrics(*, metrics_names: List[str])#

Bases: BaseModel

metrics_names: List[str]#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'metrics_names': FieldInfo(annotation=List[str], required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

class whylogs_container.whylabs.container.responses.EvaluationResult(*, metrics: List[Dict[str, Any]], validation_results: ValidationResult, perf_info: RunPerf | None, score_perf_info: RunPerf | None, action: PassAction | BlockAction, scores: List[Dict[str, Any]] = [])#

Bases: BaseModel

action: PassAction | BlockAction#
static from_results(result: WorkflowResult, score_result: WorkflowResult | None, config: ConfigInstance, request: LLMValidateRequest, perf_info: bool = False) EvaluationResult#
metrics: List[Dict[str, Any]]#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'action': FieldInfo(annotation=Union[PassAction, BlockAction], required=True, discriminator='action_type'), 'metrics': FieldInfo(annotation=List[Dict[str, Any]], required=True), 'perf_info': FieldInfo(annotation=Union[RunPerf, NoneType], required=True), 'score_perf_info': FieldInfo(annotation=Union[RunPerf, NoneType], required=True), 'scores': FieldInfo(annotation=List[Dict[str, Any]], required=False, default=[]), 'validation_results': FieldInfo(annotation=ValidationResult, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

perf_info: RunPerf | None#
score_perf_info: RunPerf | None#
scores: List[Dict[str, Any]]#
validation_results: ValidationResult#
class whylogs_container.whylabs.container.responses.LoggerStatusResponse(*, dataset_timestamps: int, dataset_profiles: int, segment_caches: int, writers: int, pending_writables: int, pending_views: List[str], views: List[str])#

Bases: BaseModel

dataset_profiles: int#
dataset_timestamps: int#
model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'dataset_profiles': FieldInfo(annotation=int, required=True), 'dataset_timestamps': FieldInfo(annotation=int, required=True), 'pending_views': FieldInfo(annotation=List[str], required=True), 'pending_writables': FieldInfo(annotation=int, required=True), 'segment_caches': FieldInfo(annotation=int, required=True), 'views': FieldInfo(annotation=List[str], required=True), 'writers': FieldInfo(annotation=int, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

pending_views: List[str]#

Pending views are ones that have already been staged for writing. This happens whenever the rolling logger hits the time interal for uploading its internal state. If these fail to upload then they’ll remain pending until the next attempt.

pending_writables: int#
segment_caches: int#
views: List[str]#

Views are currently being added to with new log messages. There haven’t been any attempts to write these yet.

writers: int#
class whylogs_container.whylabs.container.responses.ProcessLoggerStatusResponse(*, version: str, statuses: Dict[str, LoggerStatusResponse])#

Bases: BaseModel

model_computed_fields: ClassVar[dict[str, ComputedFieldInfo]] = {}#

A dictionary of computed field names and their corresponding ComputedFieldInfo objects.

model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

model_fields: ClassVar[dict[str, FieldInfo]] = {'statuses': FieldInfo(annotation=Dict[str, LoggerStatusResponse], required=True), 'version': FieldInfo(annotation=str, required=True)}#

Metadata about the fields defined on the model, mapping of field names to [FieldInfo][pydantic.fields.FieldInfo].

This replaces Model.__fields__ from Pydantic V1.

statuses: Dict[str, LoggerStatusResponse]#
version: str#

whylogs_container.whylabs.container.routes module#

whylogs_container.whylabs.container.routes.init_routes(whylogs_logger: ContainerProcessRollingLogger, validator: ValidationActor, config_actor: SynchronousConfig, config_instance: ConfigInstance) None#

whylogs_container.whylabs.container.s3_sync module#

class whylogs_container.whylabs.container.s3_sync.S3Sync(region_name: str | None = None)#

Bases: object

s3_llm_validators: Mapping[str, Workflow]#
sync_s3_policies() List[Policy]#
whylogs_container.whylabs.container.s3_sync.get_s3_client(role_arn: str | None = None) Any#

whylogs_container.whylabs.container.startup module#

whylogs_container.whylabs.container.startup.init_logging() None#
whylogs_container.whylabs.container.startup.start(port=8000)#

whylogs_container.whylabs.container.startup_util module#

whylogs_container.whylabs.container.startup_util.fake_llm_deps()#

Hack to make sure that the non-llm container doesn’t die because torch is missing

whylogs_container.whylabs.container.version module#

Module contents#