Skip to content

Sandbox Client

Simplified client for testing healthcare services with various data sources.

This class provides an intuitive interface for: - Loading test datasets (MIMIC-on-FHIR, Synthea, CSV) - Generating synthetic FHIR data - Sending requests to healthcare services - Managing request/response lifecycle

Examples:

Load from dataset registry:

>>> client = SandboxClient(
...     api_url="http://localhost:8000",
...     endpoint="/cds/cds-services/my-service"
... )
>>> client.load_from_registry("mimic-on-fhir", sample_size=10)
>>> responses = client.send_requests()

Load CDA file from path:

>>> client = SandboxClient(
...     api_url="http://localhost:8000",
...     endpoint="/notereader/fhir/",
...     protocol="soap"
... )
>>> client.load_from_path("./data/clinical_note.xml")
>>> responses = client.send_requests()

Generate data from free text:

>>> client = SandboxClient(
...     api_url="http://localhost:8000",
...     endpoint="/cds/cds-services/discharge-summarizer"
... )
>>> client.load_free_text(
...     csv_path="./data/notes.csv",
...     column_name="text",
...     workflow="encounter-discharge"
... )
>>> responses = client.send_requests()
Source code in healthchain/sandbox/sandboxclient.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
class SandboxClient:
    """
    Simplified client for testing healthcare services with various data sources.

    This class provides an intuitive interface for:
    - Loading test datasets (MIMIC-on-FHIR, Synthea, CSV)
    - Generating synthetic FHIR data
    - Sending requests to healthcare services
    - Managing request/response lifecycle

    Examples:
        Load from dataset registry:
        >>> client = SandboxClient(
        ...     api_url="http://localhost:8000",
        ...     endpoint="/cds/cds-services/my-service"
        ... )
        >>> client.load_from_registry("mimic-on-fhir", sample_size=10)
        >>> responses = client.send_requests()

        Load CDA file from path:
        >>> client = SandboxClient(
        ...     api_url="http://localhost:8000",
        ...     endpoint="/notereader/fhir/",
        ...     protocol="soap"
        ... )
        >>> client.load_from_path("./data/clinical_note.xml")
        >>> responses = client.send_requests()

        Generate data from free text:
        >>> client = SandboxClient(
        ...     api_url="http://localhost:8000",
        ...     endpoint="/cds/cds-services/discharge-summarizer"
        ... )
        >>> client.load_free_text(
        ...     csv_path="./data/notes.csv",
        ...     column_name="text",
        ...     workflow="encounter-discharge"
        ... )
        >>> responses = client.send_requests()
    """

    def __init__(
        self,
        api_url: str,
        endpoint: str,
        workflow: Optional[Union[Workflow, str]] = None,
        protocol: Literal["rest", "soap"] = "rest",
        timeout: float = 10.0,
    ):
        """
        Initialize SandboxClient.

        Args:
            api_url: Base URL of the service (e.g., "http://localhost:8000")
            endpoint: Service endpoint path (e.g., "/cds/cds-services/my-service")
            workflow: Optional workflow specification (auto-detected if not provided)
            protocol: Communication protocol - "rest" for CDS Hooks, "soap" for CDA
            timeout: Request timeout in seconds

        Raises:
            ValueError: If api_url or endpoint is invalid
        """
        try:
            self.api = httpx.URL(api_url)
        except Exception as e:
            raise ValueError(f"Invalid API URL: {str(e)}")

        self.endpoint = endpoint
        self.workflow = Workflow(workflow) if isinstance(workflow, str) else workflow
        self.protocol = ApiProtocol.soap if protocol == "soap" else ApiProtocol.rest
        self.timeout = timeout

        # Request/response management
        self.request_data: List[Union[CDSRequest, Any]] = []
        self.responses: List[Dict] = []
        self.sandbox_id = uuid.uuid4()

        log.info(
            f"Initialized SandboxClient {self.sandbox_id} for {self.api}{self.endpoint}"
        )

    def load_from_registry(
        self,
        source: str,
        **kwargs: Any,
    ) -> "SandboxClient":
        """
        Load data from the dataset registry.

        Loads pre-configured datasets like MIMIC-on-FHIR, Synthea, or custom
        registered datasets.

        Args:
            source: Dataset name (e.g., "mimic-on-fhir", "synthea")
            **kwargs: Dataset-specific parameters (e.g., sample_size, num_patients)

        Returns:
            Self for method chaining

        Raises:
            ValueError: If dataset not found in registry

        Examples:
            Discover available datasets:
            >>> from healthchain.sandbox import list_available_datasets
            >>> print(list_available_datasets())

            Load MIMIC dataset:
            >>> client.load_from_registry("mimic-on-fhir", sample_size=10)
        """
        from healthchain.sandbox.datasets import DatasetRegistry

        log.info(f"Loading dataset from registry: {source}")
        try:
            loaded_data = DatasetRegistry.load(source, **kwargs)
            self._construct_request(loaded_data)
            log.info(f"Loaded {source} dataset with {len(self.request_data)} requests")
        except KeyError:
            raise ValueError(
                f"Unknown dataset: {source}. "
                f"Available datasets: {DatasetRegistry.list_datasets()}"
            )
        return self

    def load_from_path(
        self,
        path: Union[str, Path],
        pattern: Optional[str] = None,
        workflow: Optional[Union[Workflow, str]] = None,
    ) -> "SandboxClient":
        """
        Load data from file system path.

        Supports loading single files or directories. File type is auto-detected
        from extension and protocol:
        - .xml files with SOAP protocol → CDA documents
        - .json files with REST protocol → Pre-formatted Prefetch data

        Args:
            path: File path or directory path
            pattern: Glob pattern for filtering files in directory (e.g., "*.xml")
            workflow: Optional workflow override (auto-detected from protocol if not provided)

        Returns:
            Self for method chaining

        Raises:
            FileNotFoundError: If path doesn't exist
            ValueError: If no matching files found or unsupported file type

        Examples:
            Load single CDA file:
            >>> client.load_from_path("./data/clinical_note.xml")

            Load directory of CDA files:
            >>> client.load_from_path("./data/cda_files/", pattern="*.xml")

            Load with explicit workflow:
            >>> client.load_from_path("./data/note.xml", workflow="sign-note-inpatient")
        """
        path = Path(path)
        if not path.exists():
            raise FileNotFoundError(f"Path not found: {path}")

        # Collect files to process
        files_to_load = []
        if path.is_file():
            files_to_load = [path]
        elif path.is_dir():
            pattern = pattern or "*"
            files_to_load = list(path.glob(pattern))
            if not files_to_load:
                raise ValueError(
                    f"No files found matching pattern '{pattern}' in {path}"
                )
        else:
            raise ValueError(f"Path must be a file or directory: {path}")

        log.info(f"Loading {len(files_to_load)} file(s) from {path}")

        # Process each file
        for file_path in files_to_load:
            # Determine file type from extension
            extension = file_path.suffix.lower()

            if extension == ".xml":
                with open(file_path, "r") as f:
                    xml_content = f.read()
                workflow_enum = (
                    Workflow(workflow)
                    if isinstance(workflow, str)
                    else workflow or self.workflow or Workflow.sign_note_inpatient
                )
                self._construct_request(xml_content, workflow_enum)
                log.info(f"Loaded CDA document from {file_path.name}")

            elif extension == ".json":
                with open(file_path, "r") as f:
                    json_data = json.load(f)

                try:
                    # Validate and load as Prefetch object
                    prefetch_data = Prefetch(**json_data)

                    workflow_enum = (
                        Workflow(workflow)
                        if isinstance(workflow, str)
                        else workflow or self.workflow
                    )
                    if not workflow_enum:
                        raise ValueError(
                            "Workflow must be specified when loading JSON Prefetch data. "
                            "Provide via 'workflow' parameter or set on client initialization."
                        )
                    self._construct_request(prefetch_data, workflow_enum)
                    log.info(f"Loaded Prefetch data from {file_path.name}")

                except Exception as e:
                    log.error(f"Failed to parse {file_path} as Prefetch: {e}")
                    raise ValueError(
                        f"File {file_path} is not valid Prefetch format. "
                        f"Expected JSON with 'prefetch' key containing FHIR resources. "
                        f"Error: {e}"
                    )
            else:
                log.warning(f"Skipping unsupported file type: {file_path}")

        log.info(
            f"Loaded {len(self.request_data)} requests from {len(files_to_load)} file(s)"
        )
        return self

    def load_free_text(
        self,
        csv_path: str,
        column_name: str,
        workflow: Union[Workflow, str],
        random_seed: Optional[int] = None,
        **kwargs: Any,
    ) -> "SandboxClient":
        """
        Generates a CDS prefetch from free text notes.

        Reads clinical notes from a CSV file and wraps it in FHIR DocumentReferences
        in a CDS prefetch field for CDS Hooks workflows. Generates additional synthetic
        FHIR resources as needed based on the specified workflow.

        Args:
            csv_path: Path to CSV file containing clinical notes
            column_name: Name of the column containing the text
            workflow: CDS workflow type (e.g., "encounter-discharge", "patient-view")
            random_seed: Seed for reproducible data generation
            **kwargs: Additional parameters for data generation

        Returns:
            Self for method chaining

        Raises:
            FileNotFoundError: If CSV file doesn't exist
            ValueError: If workflow is invalid or column not found

        Examples:
            Generate discharge summaries:
            >>> client.load_free_text(
            ...     csv_path="./data/discharge_notes.csv",
            ...     column_name="text",
            ...     workflow="encounter-discharge",
            ...     random_seed=42
            ... )
        """
        from .generators import CdsDataGenerator

        workflow_enum = Workflow(workflow) if isinstance(workflow, str) else workflow

        generator = CdsDataGenerator()
        generator.set_workflow(workflow_enum)

        prefetch_data = generator.generate_prefetch(
            random_seed=random_seed,
            free_text_path=csv_path,
            column_name=column_name,
            **kwargs,
        )

        self._construct_request(prefetch_data, workflow_enum)
        log.info(
            f"Generated {len(self.request_data)} requests from free text for workflow {workflow_enum.value}"
        )

        return self

    def _construct_request(
        self, data: Union[Prefetch, Any], workflow: Optional[Workflow] = None
    ) -> None:
        """
        Convert data to request format and add to queue.

        Args:
            data: Data to convert (Prefetch for CDS, DocumentReference for CDA)
            workflow: Workflow to use for request construction
        """
        workflow = workflow or self.workflow

        if self.protocol == ApiProtocol.rest:
            if not workflow:
                raise ValueError(
                    "Workflow must be specified for REST/CDS Hooks requests"
                )
            constructor = CdsRequestConstructor()
            request = constructor.construct_request(data, workflow)
        elif self.protocol == ApiProtocol.soap:
            constructor = ClinDocRequestConstructor()
            request = constructor.construct_request(
                data, workflow or Workflow.sign_note_inpatient
            )
        else:
            raise ValueError(f"Unsupported protocol: {self.protocol}")

        self.request_data.append(request)

    def send_requests(self) -> List[Dict]:
        """
        Send all queued requests to the service.

        Returns:
            List of response dictionaries

        Raises:
            RuntimeError: If no requests are queued
        """
        if not self.request_data:
            raise RuntimeError(
                "No requests to send. Load data first using load_from_registry(), load_from_path(), or load_free_text()"
            )

        url = self.api.join(self.endpoint)
        log.info(f"Sending {len(self.request_data)} requests to {url}")

        with httpx.Client(follow_redirects=True) as client:
            responses: List[Dict] = []
            timeout = httpx.Timeout(self.timeout, read=None)

            for request in self.request_data:
                try:
                    if self.protocol == ApiProtocol.soap:
                        headers = {"Content-Type": "text/xml; charset=utf-8"}
                        response = client.post(
                            url=str(url),
                            data=request.document,
                            headers=headers,
                            timeout=timeout,
                        )
                        response.raise_for_status()
                        response_model = CdaResponse(document=response.text)
                        responses.append(response_model.model_dump_xml())
                    else:
                        # REST/CDS Hooks
                        log.debug(f"Making POST request to: {url}")
                        response = client.post(
                            url=str(url),
                            json=request.model_dump(exclude_none=True),
                            timeout=timeout,
                        )
                        response.raise_for_status()
                        response_data = response.json()
                        try:
                            cds_response = CDSResponse(**response_data)
                            responses.append(cds_response.model_dump(exclude_none=True))
                        except Exception:
                            # Fallback to raw response if parsing fails
                            responses.append(response_data)

                except httpx.HTTPStatusError as exc:
                    try:
                        error_content = exc.response.json()
                    except Exception:
                        error_content = exc.response.text
                    log.error(
                        f"Error response {exc.response.status_code} while requesting "
                        f"{exc.request.url!r}: {error_content}"
                    )
                    responses.append({})
                except httpx.TimeoutException as exc:
                    log.error(f"Request to {exc.request.url!r} timed out!")
                    responses.append({})
                except httpx.RequestError as exc:
                    log.error(
                        f"An error occurred while requesting {exc.request.url!r}."
                    )
                    responses.append({})

        self.responses = responses
        log.info(f"Received {len(responses)} responses")

        return responses

    def save_results(self, directory: Union[str, Path] = "./output/") -> None:
        """
        Save request and response data to disk.

        Args:
            directory: Directory to save data to (default: "./output/")

        Raises:
            RuntimeError: If no responses are available to save
        """
        if not self.responses:
            raise RuntimeError(
                "No responses to save. Send requests first using send_requests()"
            )

        save_dir = Path(directory)
        request_path = ensure_directory_exists(save_dir / "requests")

        # Determine file extension based on protocol
        extension = "xml" if self.protocol == ApiProtocol.soap else "json"

        # Save requests
        if self.protocol == ApiProtocol.soap:
            request_data = [request.model_dump_xml() for request in self.request_data]
        else:
            request_data = [
                request.model_dump(exclude_none=True) for request in self.request_data
            ]

        save_data_to_directory(
            request_data,
            "request",
            self.sandbox_id,
            request_path,
            extension,
        )
        log.info(f"Saved request data at {request_path}/")

        # Save responses
        response_path = ensure_directory_exists(save_dir / "responses")
        save_data_to_directory(
            self.responses,
            "response",
            self.sandbox_id,
            response_path,
            extension,
        )
        log.info(f"Saved response data at {response_path}/")

    def get_status(self) -> Dict[str, Any]:
        """
        Get current client status and statistics.

        Returns:
            Dictionary containing client status information
        """
        return {
            "sandbox_id": str(self.sandbox_id),
            "api_url": str(self.api),
            "endpoint": self.endpoint,
            "protocol": self.protocol.value
            if hasattr(self.protocol, "value")
            else str(self.protocol),
            "workflow": self.workflow.value if self.workflow else None,
            "requests_queued": len(self.request_data),
            "responses_received": len(self.responses),
        }

    def __repr__(self) -> str:
        """String representation of SandboxClient."""
        return (
            f"SandboxClient(api_url='{self.api}', endpoint='{self.endpoint}', "
            f"protocol='{self.protocol.value if hasattr(self.protocol, 'value') else self.protocol}', "
            f"requests={len(self.request_data)})"
        )

__init__(api_url, endpoint, workflow=None, protocol='rest', timeout=10.0)

Initialize SandboxClient.

PARAMETER DESCRIPTION
api_url

Base URL of the service (e.g., "http://localhost:8000")

TYPE: str

endpoint

Service endpoint path (e.g., "/cds/cds-services/my-service")

TYPE: str

workflow

Optional workflow specification (auto-detected if not provided)

TYPE: Optional[Union[Workflow, str]] DEFAULT: None

protocol

Communication protocol - "rest" for CDS Hooks, "soap" for CDA

TYPE: Literal['rest', 'soap'] DEFAULT: 'rest'

timeout

Request timeout in seconds

TYPE: float DEFAULT: 10.0

RAISES DESCRIPTION
ValueError

If api_url or endpoint is invalid

Source code in healthchain/sandbox/sandboxclient.py
def __init__(
    self,
    api_url: str,
    endpoint: str,
    workflow: Optional[Union[Workflow, str]] = None,
    protocol: Literal["rest", "soap"] = "rest",
    timeout: float = 10.0,
):
    """
    Initialize SandboxClient.

    Args:
        api_url: Base URL of the service (e.g., "http://localhost:8000")
        endpoint: Service endpoint path (e.g., "/cds/cds-services/my-service")
        workflow: Optional workflow specification (auto-detected if not provided)
        protocol: Communication protocol - "rest" for CDS Hooks, "soap" for CDA
        timeout: Request timeout in seconds

    Raises:
        ValueError: If api_url or endpoint is invalid
    """
    try:
        self.api = httpx.URL(api_url)
    except Exception as e:
        raise ValueError(f"Invalid API URL: {str(e)}")

    self.endpoint = endpoint
    self.workflow = Workflow(workflow) if isinstance(workflow, str) else workflow
    self.protocol = ApiProtocol.soap if protocol == "soap" else ApiProtocol.rest
    self.timeout = timeout

    # Request/response management
    self.request_data: List[Union[CDSRequest, Any]] = []
    self.responses: List[Dict] = []
    self.sandbox_id = uuid.uuid4()

    log.info(
        f"Initialized SandboxClient {self.sandbox_id} for {self.api}{self.endpoint}"
    )

__repr__()

String representation of SandboxClient.

Source code in healthchain/sandbox/sandboxclient.py
def __repr__(self) -> str:
    """String representation of SandboxClient."""
    return (
        f"SandboxClient(api_url='{self.api}', endpoint='{self.endpoint}', "
        f"protocol='{self.protocol.value if hasattr(self.protocol, 'value') else self.protocol}', "
        f"requests={len(self.request_data)})"
    )

get_status()

Get current client status and statistics.

RETURNS DESCRIPTION
Dict[str, Any]

Dictionary containing client status information

Source code in healthchain/sandbox/sandboxclient.py
def get_status(self) -> Dict[str, Any]:
    """
    Get current client status and statistics.

    Returns:
        Dictionary containing client status information
    """
    return {
        "sandbox_id": str(self.sandbox_id),
        "api_url": str(self.api),
        "endpoint": self.endpoint,
        "protocol": self.protocol.value
        if hasattr(self.protocol, "value")
        else str(self.protocol),
        "workflow": self.workflow.value if self.workflow else None,
        "requests_queued": len(self.request_data),
        "responses_received": len(self.responses),
    }

load_free_text(csv_path, column_name, workflow, random_seed=None, **kwargs)

Generates a CDS prefetch from free text notes.

Reads clinical notes from a CSV file and wraps it in FHIR DocumentReferences in a CDS prefetch field for CDS Hooks workflows. Generates additional synthetic FHIR resources as needed based on the specified workflow.

PARAMETER DESCRIPTION
csv_path

Path to CSV file containing clinical notes

TYPE: str

column_name

Name of the column containing the text

TYPE: str

workflow

CDS workflow type (e.g., "encounter-discharge", "patient-view")

TYPE: Union[Workflow, str]

random_seed

Seed for reproducible data generation

TYPE: Optional[int] DEFAULT: None

**kwargs

Additional parameters for data generation

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
SandboxClient

Self for method chaining

RAISES DESCRIPTION
FileNotFoundError

If CSV file doesn't exist

ValueError

If workflow is invalid or column not found

Examples:

Generate discharge summaries:

>>> client.load_free_text(
...     csv_path="./data/discharge_notes.csv",
...     column_name="text",
...     workflow="encounter-discharge",
...     random_seed=42
... )
Source code in healthchain/sandbox/sandboxclient.py
def load_free_text(
    self,
    csv_path: str,
    column_name: str,
    workflow: Union[Workflow, str],
    random_seed: Optional[int] = None,
    **kwargs: Any,
) -> "SandboxClient":
    """
    Generates a CDS prefetch from free text notes.

    Reads clinical notes from a CSV file and wraps it in FHIR DocumentReferences
    in a CDS prefetch field for CDS Hooks workflows. Generates additional synthetic
    FHIR resources as needed based on the specified workflow.

    Args:
        csv_path: Path to CSV file containing clinical notes
        column_name: Name of the column containing the text
        workflow: CDS workflow type (e.g., "encounter-discharge", "patient-view")
        random_seed: Seed for reproducible data generation
        **kwargs: Additional parameters for data generation

    Returns:
        Self for method chaining

    Raises:
        FileNotFoundError: If CSV file doesn't exist
        ValueError: If workflow is invalid or column not found

    Examples:
        Generate discharge summaries:
        >>> client.load_free_text(
        ...     csv_path="./data/discharge_notes.csv",
        ...     column_name="text",
        ...     workflow="encounter-discharge",
        ...     random_seed=42
        ... )
    """
    from .generators import CdsDataGenerator

    workflow_enum = Workflow(workflow) if isinstance(workflow, str) else workflow

    generator = CdsDataGenerator()
    generator.set_workflow(workflow_enum)

    prefetch_data = generator.generate_prefetch(
        random_seed=random_seed,
        free_text_path=csv_path,
        column_name=column_name,
        **kwargs,
    )

    self._construct_request(prefetch_data, workflow_enum)
    log.info(
        f"Generated {len(self.request_data)} requests from free text for workflow {workflow_enum.value}"
    )

    return self

load_from_path(path, pattern=None, workflow=None)

Load data from file system path.

Supports loading single files or directories. File type is auto-detected from extension and protocol: - .xml files with SOAP protocol → CDA documents - .json files with REST protocol → Pre-formatted Prefetch data

PARAMETER DESCRIPTION
path

File path or directory path

TYPE: Union[str, Path]

pattern

Glob pattern for filtering files in directory (e.g., "*.xml")

TYPE: Optional[str] DEFAULT: None

workflow

Optional workflow override (auto-detected from protocol if not provided)

TYPE: Optional[Union[Workflow, str]] DEFAULT: None

RETURNS DESCRIPTION
SandboxClient

Self for method chaining

RAISES DESCRIPTION
FileNotFoundError

If path doesn't exist

ValueError

If no matching files found or unsupported file type

Examples:

Load single CDA file:

>>> client.load_from_path("./data/clinical_note.xml")

Load directory of CDA files:

>>> client.load_from_path("./data/cda_files/", pattern="*.xml")

Load with explicit workflow:

>>> client.load_from_path("./data/note.xml", workflow="sign-note-inpatient")
Source code in healthchain/sandbox/sandboxclient.py
def load_from_path(
    self,
    path: Union[str, Path],
    pattern: Optional[str] = None,
    workflow: Optional[Union[Workflow, str]] = None,
) -> "SandboxClient":
    """
    Load data from file system path.

    Supports loading single files or directories. File type is auto-detected
    from extension and protocol:
    - .xml files with SOAP protocol → CDA documents
    - .json files with REST protocol → Pre-formatted Prefetch data

    Args:
        path: File path or directory path
        pattern: Glob pattern for filtering files in directory (e.g., "*.xml")
        workflow: Optional workflow override (auto-detected from protocol if not provided)

    Returns:
        Self for method chaining

    Raises:
        FileNotFoundError: If path doesn't exist
        ValueError: If no matching files found or unsupported file type

    Examples:
        Load single CDA file:
        >>> client.load_from_path("./data/clinical_note.xml")

        Load directory of CDA files:
        >>> client.load_from_path("./data/cda_files/", pattern="*.xml")

        Load with explicit workflow:
        >>> client.load_from_path("./data/note.xml", workflow="sign-note-inpatient")
    """
    path = Path(path)
    if not path.exists():
        raise FileNotFoundError(f"Path not found: {path}")

    # Collect files to process
    files_to_load = []
    if path.is_file():
        files_to_load = [path]
    elif path.is_dir():
        pattern = pattern or "*"
        files_to_load = list(path.glob(pattern))
        if not files_to_load:
            raise ValueError(
                f"No files found matching pattern '{pattern}' in {path}"
            )
    else:
        raise ValueError(f"Path must be a file or directory: {path}")

    log.info(f"Loading {len(files_to_load)} file(s) from {path}")

    # Process each file
    for file_path in files_to_load:
        # Determine file type from extension
        extension = file_path.suffix.lower()

        if extension == ".xml":
            with open(file_path, "r") as f:
                xml_content = f.read()
            workflow_enum = (
                Workflow(workflow)
                if isinstance(workflow, str)
                else workflow or self.workflow or Workflow.sign_note_inpatient
            )
            self._construct_request(xml_content, workflow_enum)
            log.info(f"Loaded CDA document from {file_path.name}")

        elif extension == ".json":
            with open(file_path, "r") as f:
                json_data = json.load(f)

            try:
                # Validate and load as Prefetch object
                prefetch_data = Prefetch(**json_data)

                workflow_enum = (
                    Workflow(workflow)
                    if isinstance(workflow, str)
                    else workflow or self.workflow
                )
                if not workflow_enum:
                    raise ValueError(
                        "Workflow must be specified when loading JSON Prefetch data. "
                        "Provide via 'workflow' parameter or set on client initialization."
                    )
                self._construct_request(prefetch_data, workflow_enum)
                log.info(f"Loaded Prefetch data from {file_path.name}")

            except Exception as e:
                log.error(f"Failed to parse {file_path} as Prefetch: {e}")
                raise ValueError(
                    f"File {file_path} is not valid Prefetch format. "
                    f"Expected JSON with 'prefetch' key containing FHIR resources. "
                    f"Error: {e}"
                )
        else:
            log.warning(f"Skipping unsupported file type: {file_path}")

    log.info(
        f"Loaded {len(self.request_data)} requests from {len(files_to_load)} file(s)"
    )
    return self

load_from_registry(source, **kwargs)

Load data from the dataset registry.

Loads pre-configured datasets like MIMIC-on-FHIR, Synthea, or custom registered datasets.

PARAMETER DESCRIPTION
source

Dataset name (e.g., "mimic-on-fhir", "synthea")

TYPE: str

**kwargs

Dataset-specific parameters (e.g., sample_size, num_patients)

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
SandboxClient

Self for method chaining

RAISES DESCRIPTION
ValueError

If dataset not found in registry

Examples:

Discover available datasets:

>>> from healthchain.sandbox import list_available_datasets
>>> print(list_available_datasets())

Load MIMIC dataset:

>>> client.load_from_registry("mimic-on-fhir", sample_size=10)
Source code in healthchain/sandbox/sandboxclient.py
def load_from_registry(
    self,
    source: str,
    **kwargs: Any,
) -> "SandboxClient":
    """
    Load data from the dataset registry.

    Loads pre-configured datasets like MIMIC-on-FHIR, Synthea, or custom
    registered datasets.

    Args:
        source: Dataset name (e.g., "mimic-on-fhir", "synthea")
        **kwargs: Dataset-specific parameters (e.g., sample_size, num_patients)

    Returns:
        Self for method chaining

    Raises:
        ValueError: If dataset not found in registry

    Examples:
        Discover available datasets:
        >>> from healthchain.sandbox import list_available_datasets
        >>> print(list_available_datasets())

        Load MIMIC dataset:
        >>> client.load_from_registry("mimic-on-fhir", sample_size=10)
    """
    from healthchain.sandbox.datasets import DatasetRegistry

    log.info(f"Loading dataset from registry: {source}")
    try:
        loaded_data = DatasetRegistry.load(source, **kwargs)
        self._construct_request(loaded_data)
        log.info(f"Loaded {source} dataset with {len(self.request_data)} requests")
    except KeyError:
        raise ValueError(
            f"Unknown dataset: {source}. "
            f"Available datasets: {DatasetRegistry.list_datasets()}"
        )
    return self

save_results(directory='./output/')

Save request and response data to disk.

PARAMETER DESCRIPTION
directory

Directory to save data to (default: "./output/")

TYPE: Union[str, Path] DEFAULT: './output/'

RAISES DESCRIPTION
RuntimeError

If no responses are available to save

Source code in healthchain/sandbox/sandboxclient.py
def save_results(self, directory: Union[str, Path] = "./output/") -> None:
    """
    Save request and response data to disk.

    Args:
        directory: Directory to save data to (default: "./output/")

    Raises:
        RuntimeError: If no responses are available to save
    """
    if not self.responses:
        raise RuntimeError(
            "No responses to save. Send requests first using send_requests()"
        )

    save_dir = Path(directory)
    request_path = ensure_directory_exists(save_dir / "requests")

    # Determine file extension based on protocol
    extension = "xml" if self.protocol == ApiProtocol.soap else "json"

    # Save requests
    if self.protocol == ApiProtocol.soap:
        request_data = [request.model_dump_xml() for request in self.request_data]
    else:
        request_data = [
            request.model_dump(exclude_none=True) for request in self.request_data
        ]

    save_data_to_directory(
        request_data,
        "request",
        self.sandbox_id,
        request_path,
        extension,
    )
    log.info(f"Saved request data at {request_path}/")

    # Save responses
    response_path = ensure_directory_exists(save_dir / "responses")
    save_data_to_directory(
        self.responses,
        "response",
        self.sandbox_id,
        response_path,
        extension,
    )
    log.info(f"Saved response data at {response_path}/")

send_requests()

Send all queued requests to the service.

RETURNS DESCRIPTION
List[Dict]

List of response dictionaries

RAISES DESCRIPTION
RuntimeError

If no requests are queued

Source code in healthchain/sandbox/sandboxclient.py
def send_requests(self) -> List[Dict]:
    """
    Send all queued requests to the service.

    Returns:
        List of response dictionaries

    Raises:
        RuntimeError: If no requests are queued
    """
    if not self.request_data:
        raise RuntimeError(
            "No requests to send. Load data first using load_from_registry(), load_from_path(), or load_free_text()"
        )

    url = self.api.join(self.endpoint)
    log.info(f"Sending {len(self.request_data)} requests to {url}")

    with httpx.Client(follow_redirects=True) as client:
        responses: List[Dict] = []
        timeout = httpx.Timeout(self.timeout, read=None)

        for request in self.request_data:
            try:
                if self.protocol == ApiProtocol.soap:
                    headers = {"Content-Type": "text/xml; charset=utf-8"}
                    response = client.post(
                        url=str(url),
                        data=request.document,
                        headers=headers,
                        timeout=timeout,
                    )
                    response.raise_for_status()
                    response_model = CdaResponse(document=response.text)
                    responses.append(response_model.model_dump_xml())
                else:
                    # REST/CDS Hooks
                    log.debug(f"Making POST request to: {url}")
                    response = client.post(
                        url=str(url),
                        json=request.model_dump(exclude_none=True),
                        timeout=timeout,
                    )
                    response.raise_for_status()
                    response_data = response.json()
                    try:
                        cds_response = CDSResponse(**response_data)
                        responses.append(cds_response.model_dump(exclude_none=True))
                    except Exception:
                        # Fallback to raw response if parsing fails
                        responses.append(response_data)

            except httpx.HTTPStatusError as exc:
                try:
                    error_content = exc.response.json()
                except Exception:
                    error_content = exc.response.text
                log.error(
                    f"Error response {exc.response.status_code} while requesting "
                    f"{exc.request.url!r}: {error_content}"
                )
                responses.append({})
            except httpx.TimeoutException as exc:
                log.error(f"Request to {exc.request.url!r} timed out!")
                responses.append({})
            except httpx.RequestError as exc:
                log.error(
                    f"An error occurred while requesting {exc.request.url!r}."
                )
                responses.append({})

    self.responses = responses
    log.info(f"Received {len(responses)} responses")

    return responses

CdsDataGenerator

A class to generate CDS (Clinical Decision Support) data based on specified workflows and constraints.

This class provides functionality to generate synthetic FHIR resources for testing CDS systems. It uses registered data generators to create resources like Patients, Encounters, Conditions etc. based on configured workflows. It can also incorporate free text data from CSV files.

ATTRIBUTE DESCRIPTION
registry

A registry mapping generator names to generator classes.

TYPE: dict

mappings

A mapping of workflow names to lists of required generators.

TYPE: dict

generated_data

The most recently generated FHIR resources.

TYPE: Dict[str, Resource]

workflow

The currently active workflow.

TYPE: str

Example

generator = CdsDataGenerator() generator.set_workflow("encounter_discharge") data = generator.generate_prefetch( ... random_seed=42 ... )

Source code in healthchain/sandbox/generators/cdsdatagenerator.py
class CdsDataGenerator:
    """
    A class to generate CDS (Clinical Decision Support) data based on specified workflows and constraints.

    This class provides functionality to generate synthetic FHIR resources for testing CDS systems.
    It uses registered data generators to create resources like Patients, Encounters, Conditions etc.
    based on configured workflows. It can also incorporate free text data from CSV files.

    Attributes:
        registry (dict): A registry mapping generator names to generator classes.
        mappings (dict): A mapping of workflow names to lists of required generators.
        generated_data (Dict[str, Resource]): The most recently generated FHIR resources.
        workflow (str): The currently active workflow.

    Example:
        >>> generator = CdsDataGenerator()
        >>> generator.set_workflow("encounter_discharge")
        >>> data = generator.generate_prefetch(
        ...     random_seed=42
        ... )
    """

    # TODO: Add ordering and logic so that patient/encounter IDs are passed to subsequent generators
    # TODO: Some of the resources should be allowed to be multiplied

    default_workflow_mappings = {
        Workflow.encounter_discharge: [
            {"generator": "EncounterGenerator"},
            {"generator": "ConditionGenerator"},
            {"generator": "ProcedureGenerator"},
            {"generator": "MedicationRequestGenerator"},
        ],
        Workflow.patient_view: [
            {"generator": "PatientGenerator"},
            {"generator": "EncounterGenerator"},
            {"generator": "ConditionGenerator"},
        ],
    }

    def __init__(self):
        self.registry = generator_registry
        self.mappings = self.default_workflow_mappings
        self.generated_data: Dict[str, Resource] = {}

    def fetch_generator(self, generator_name: str) -> Callable:
        """
        Fetches a data generator class by its name from the registry.

        Args:
            generator_name (str): The name of the data generator to fetch (e.g. "PatientGenerator", "EncounterGenerator")

        Returns:
            Callable: The data generator class that can be used to generate FHIR resources. Returns None if generator not found.

        Example:
            >>> generator = CdsDataGenerator()
            >>> patient_gen = generator.fetch_generator("PatientGenerator")
            >>> patient = patient_gen.generate()
        """
        return self.registry.get(generator_name)

    def set_workflow(self, workflow: str) -> None:
        """
        Sets the current workflow to be used for data generation.

        Parameters:
            workflow (str): The name of the workflow to set.
        """
        self.workflow = workflow

    def generate_prefetch(
        self,
        constraints: Optional[list] = None,
        free_text_path: Optional[str] = None,
        column_name: Optional[str] = None,
        random_seed: Optional[int] = None,
    ) -> Prefetch:
        """
        Generates CDS data based on the current workflow, constraints, and optional free text data.

        This method generates FHIR resources according to the configured workflow mapping. For each
        resource type in the workflow, it uses the corresponding generator to create a FHIR resource.
        If free text data is provided via CSV, it will also generate a DocumentReference containing
        randomly selected text from the CSV.

        Args:
            constraints (Optional[list]): A list of constraints to apply to the data generation.
                Each constraint should match the format expected by the individual generators.
            free_text_path (Optional[str]): Path to a CSV file containing free text data to be
                included as DocumentReferences. If provided, column_name must also be specified.
            column_name (Optional[str]): The name of the column in the CSV file containing the
                free text data to use. Required if free_text_path is provided.
            random_seed (Optional[int]): Seed value for random number generation to ensure
                reproducible results. If not provided, generation will be truly random.

        Returns:
            Prefetch: A dictionary mapping resource types to generated FHIR resources.
                The keys are lowercase resource type names (e.g. "patient", "encounter").
                If free text is provided, includes a "document" key with a DocumentReference.

        Raises:
            ValueError: If the configured workflow is not found in the mappings
            FileNotFoundError: If the free_text_path is provided but file not found
            ValueError: If free_text_path provided without column_name
        """
        prefetch = Prefetch(prefetch={})

        if self.workflow not in self.mappings.keys():
            raise ValueError(f"Workflow {self.workflow} not found in mappings")

        for resource in self.mappings[self.workflow]:
            generator_name = resource["generator"]
            generator = self.fetch_generator(generator_name)
            resource = generator.generate(
                constraints=constraints, random_seed=random_seed
            )

            prefetch.prefetch[resource.__resource_type__.lower()] = resource

        parsed_free_text = (
            self.free_text_parser(free_text_path, column_name)
            if free_text_path
            else None
        )
        if parsed_free_text:
            prefetch.prefetch["document"] = create_document_reference(
                data=random.choice(parsed_free_text),
                content_type="text/plain",
                status="current",
                description="Free text created by HealthChain CdsDataGenerator",
                attachment_title="Free text created by HealthChain CdsDataGenerator",
            )

        self.generated_data = prefetch

        return self.generated_data

    def free_text_parser(self, path_to_csv: str, column_name: str) -> List[str]:
        """
        Parse free text data from a CSV file.

        This method reads a CSV file and extracts text data from a specified column. The text data
        can later be used to create DocumentReference resources.

        Args:
            path_to_csv (str): Path to the CSV file containing the free text data.
            column_name (str): Name of the column in the CSV file to extract text from.

        Returns:
            List[str]: List of text strings extracted from the specified column.

        Raises:
            FileNotFoundError: If the specified CSV file does not exist or is not a file.
            ValueError: If column_name is not provided.
            Exception: If any other error occurs while reading/parsing the CSV file.
        """
        text_data = []

        # Check that path_to_csv is a valid path with pathlib
        path = Path(path_to_csv)
        if not path.is_file():
            raise FileNotFoundError(
                f"The file {path_to_csv} does not exist or is not a file."
            )

        try:
            with path.open(mode="r", newline="") as file:
                reader = csv.DictReader(file)
                if column_name is not None:
                    for row in reader:
                        text_data.append(row[column_name])
                else:
                    raise ValueError(
                        "Column name must be provided when header is True."
                    )
        except Exception as ex:
            logger.error(f"An error occurred: {ex}")

        return text_data

fetch_generator(generator_name)

Fetches a data generator class by its name from the registry.

PARAMETER DESCRIPTION
generator_name

The name of the data generator to fetch (e.g. "PatientGenerator", "EncounterGenerator")

TYPE: str

RETURNS DESCRIPTION
Callable

The data generator class that can be used to generate FHIR resources. Returns None if generator not found.

TYPE: Callable

Example

generator = CdsDataGenerator() patient_gen = generator.fetch_generator("PatientGenerator") patient = patient_gen.generate()

Source code in healthchain/sandbox/generators/cdsdatagenerator.py
def fetch_generator(self, generator_name: str) -> Callable:
    """
    Fetches a data generator class by its name from the registry.

    Args:
        generator_name (str): The name of the data generator to fetch (e.g. "PatientGenerator", "EncounterGenerator")

    Returns:
        Callable: The data generator class that can be used to generate FHIR resources. Returns None if generator not found.

    Example:
        >>> generator = CdsDataGenerator()
        >>> patient_gen = generator.fetch_generator("PatientGenerator")
        >>> patient = patient_gen.generate()
    """
    return self.registry.get(generator_name)

free_text_parser(path_to_csv, column_name)

Parse free text data from a CSV file.

This method reads a CSV file and extracts text data from a specified column. The text data can later be used to create DocumentReference resources.

PARAMETER DESCRIPTION
path_to_csv

Path to the CSV file containing the free text data.

TYPE: str

column_name

Name of the column in the CSV file to extract text from.

TYPE: str

RETURNS DESCRIPTION
List[str]

List[str]: List of text strings extracted from the specified column.

RAISES DESCRIPTION
FileNotFoundError

If the specified CSV file does not exist or is not a file.

ValueError

If column_name is not provided.

Exception

If any other error occurs while reading/parsing the CSV file.

Source code in healthchain/sandbox/generators/cdsdatagenerator.py
def free_text_parser(self, path_to_csv: str, column_name: str) -> List[str]:
    """
    Parse free text data from a CSV file.

    This method reads a CSV file and extracts text data from a specified column. The text data
    can later be used to create DocumentReference resources.

    Args:
        path_to_csv (str): Path to the CSV file containing the free text data.
        column_name (str): Name of the column in the CSV file to extract text from.

    Returns:
        List[str]: List of text strings extracted from the specified column.

    Raises:
        FileNotFoundError: If the specified CSV file does not exist or is not a file.
        ValueError: If column_name is not provided.
        Exception: If any other error occurs while reading/parsing the CSV file.
    """
    text_data = []

    # Check that path_to_csv is a valid path with pathlib
    path = Path(path_to_csv)
    if not path.is_file():
        raise FileNotFoundError(
            f"The file {path_to_csv} does not exist or is not a file."
        )

    try:
        with path.open(mode="r", newline="") as file:
            reader = csv.DictReader(file)
            if column_name is not None:
                for row in reader:
                    text_data.append(row[column_name])
            else:
                raise ValueError(
                    "Column name must be provided when header is True."
                )
    except Exception as ex:
        logger.error(f"An error occurred: {ex}")

    return text_data

generate_prefetch(constraints=None, free_text_path=None, column_name=None, random_seed=None)

Generates CDS data based on the current workflow, constraints, and optional free text data.

This method generates FHIR resources according to the configured workflow mapping. For each resource type in the workflow, it uses the corresponding generator to create a FHIR resource. If free text data is provided via CSV, it will also generate a DocumentReference containing randomly selected text from the CSV.

PARAMETER DESCRIPTION
constraints

A list of constraints to apply to the data generation. Each constraint should match the format expected by the individual generators.

TYPE: Optional[list] DEFAULT: None

free_text_path

Path to a CSV file containing free text data to be included as DocumentReferences. If provided, column_name must also be specified.

TYPE: Optional[str] DEFAULT: None

column_name

The name of the column in the CSV file containing the free text data to use. Required if free_text_path is provided.

TYPE: Optional[str] DEFAULT: None

random_seed

Seed value for random number generation to ensure reproducible results. If not provided, generation will be truly random.

TYPE: Optional[int] DEFAULT: None

RETURNS DESCRIPTION
Prefetch

A dictionary mapping resource types to generated FHIR resources. The keys are lowercase resource type names (e.g. "patient", "encounter"). If free text is provided, includes a "document" key with a DocumentReference.

TYPE: Prefetch

RAISES DESCRIPTION
ValueError

If the configured workflow is not found in the mappings

FileNotFoundError

If the free_text_path is provided but file not found

ValueError

If free_text_path provided without column_name

Source code in healthchain/sandbox/generators/cdsdatagenerator.py
def generate_prefetch(
    self,
    constraints: Optional[list] = None,
    free_text_path: Optional[str] = None,
    column_name: Optional[str] = None,
    random_seed: Optional[int] = None,
) -> Prefetch:
    """
    Generates CDS data based on the current workflow, constraints, and optional free text data.

    This method generates FHIR resources according to the configured workflow mapping. For each
    resource type in the workflow, it uses the corresponding generator to create a FHIR resource.
    If free text data is provided via CSV, it will also generate a DocumentReference containing
    randomly selected text from the CSV.

    Args:
        constraints (Optional[list]): A list of constraints to apply to the data generation.
            Each constraint should match the format expected by the individual generators.
        free_text_path (Optional[str]): Path to a CSV file containing free text data to be
            included as DocumentReferences. If provided, column_name must also be specified.
        column_name (Optional[str]): The name of the column in the CSV file containing the
            free text data to use. Required if free_text_path is provided.
        random_seed (Optional[int]): Seed value for random number generation to ensure
            reproducible results. If not provided, generation will be truly random.

    Returns:
        Prefetch: A dictionary mapping resource types to generated FHIR resources.
            The keys are lowercase resource type names (e.g. "patient", "encounter").
            If free text is provided, includes a "document" key with a DocumentReference.

    Raises:
        ValueError: If the configured workflow is not found in the mappings
        FileNotFoundError: If the free_text_path is provided but file not found
        ValueError: If free_text_path provided without column_name
    """
    prefetch = Prefetch(prefetch={})

    if self.workflow not in self.mappings.keys():
        raise ValueError(f"Workflow {self.workflow} not found in mappings")

    for resource in self.mappings[self.workflow]:
        generator_name = resource["generator"]
        generator = self.fetch_generator(generator_name)
        resource = generator.generate(
            constraints=constraints, random_seed=random_seed
        )

        prefetch.prefetch[resource.__resource_type__.lower()] = resource

    parsed_free_text = (
        self.free_text_parser(free_text_path, column_name)
        if free_text_path
        else None
    )
    if parsed_free_text:
        prefetch.prefetch["document"] = create_document_reference(
            data=random.choice(parsed_free_text),
            content_type="text/plain",
            status="current",
            description="Free text created by HealthChain CdsDataGenerator",
            attachment_title="Free text created by HealthChain CdsDataGenerator",
        )

    self.generated_data = prefetch

    return self.generated_data

set_workflow(workflow)

Sets the current workflow to be used for data generation.

PARAMETER DESCRIPTION
workflow

The name of the workflow to set.

TYPE: str

Source code in healthchain/sandbox/generators/cdsdatagenerator.py
def set_workflow(self, workflow: str) -> None:
    """
    Sets the current workflow to be used for data generation.

    Parameters:
        workflow (str): The name of the workflow to set.
    """
    self.workflow = workflow

CDSRequest

Bases: BaseModel

A model representing the data structure for a CDS service call, triggered by specific hooks within a healthcare application.

ATTRIBUTE DESCRIPTION
hook

The hook that triggered this CDS Service call. For example, 'patient-view'.

TYPE: str

hookInstance

A universally unique identifier for this particular hook call.

TYPE: UUID

fhirServer

The base URL of the CDS Client's FHIR server. This field is required if fhirAuthorization is provided.

TYPE: HttpUrl

fhirAuthorization

Optional authorization details providing a bearer access token for FHIR resources.

TYPE: Optional[FhirAuthorization]

context

Hook-specific contextual data required by the CDS service.

TYPE: Dict[str, Any]

prefetch

Optional FHIR data that was prefetched by the CDS Client.

TYPE: Optional[Dict[str, Any]]

Documentation: https://cds-hooks.org/specification/current/#http-request_1

Source code in healthchain/models/requests/cdsrequest.py
class CDSRequest(BaseModel):
    """
    A model representing the data structure for a CDS service call, triggered by specific hooks
    within a healthcare application.

    Attributes:
        hook (str): The hook that triggered this CDS Service call. For example, 'patient-view'.
        hookInstance (UUID): A universally unique identifier for this particular hook call.
        fhirServer (HttpUrl): The base URL of the CDS Client's FHIR server. This field is required if `fhirAuthorization` is provided.
        fhirAuthorization (Optional[FhirAuthorization]): Optional authorization details providing a bearer access token for FHIR resources.
        context (Dict[str, Any]): Hook-specific contextual data required by the CDS service.
        prefetch (Optional[Dict[str, Any]]): Optional FHIR data that was prefetched by the CDS Client.

    Documentation: https://cds-hooks.org/specification/current/#http-request_1
    """

    hook: str
    hookInstance: str = Field(default_factory=id_generator.generate_random_uuid)
    context: BaseHookContext
    fhirServer: Optional[HttpUrl] = None
    fhirAuthorization: Optional[FHIRAuthorization] = (
        None  # TODO: note this is required if fhirserver is given
    )
    prefetch: Optional[Dict[str, Any]] = (
        None  # fhir resource is passed either thru prefetched template of fhir server
    )
    extension: Optional[List[Dict[str, Any]]] = None

    def model_dump(self, **kwargs):
        """
        Model dump method to convert any nested datetime and byte objects to strings for readability.
        This is also a workaround to this Pydantic V2 issue https://github.com/pydantic/pydantic/issues/9571
        For proper JSON serialization, should use model_dump_json() instead when issue is resolved.
        """

        def convert_objects(obj):
            if isinstance(obj, dict):
                return {k: convert_objects(v) for k, v in obj.items()}
            elif isinstance(obj, list):
                return [convert_objects(i) for i in obj]
            elif isinstance(obj, datetime):
                return obj.astimezone().isoformat()
            elif isinstance(obj, bytes):
                return obj.decode("utf-8")
            return obj

        dump = super().model_dump(**kwargs)
        return convert_objects(dump)

model_dump(**kwargs)

Model dump method to convert any nested datetime and byte objects to strings for readability. This is also a workaround to this Pydantic V2 issue https://github.com/pydantic/pydantic/issues/9571 For proper JSON serialization, should use model_dump_json() instead when issue is resolved.

Source code in healthchain/models/requests/cdsrequest.py
def model_dump(self, **kwargs):
    """
    Model dump method to convert any nested datetime and byte objects to strings for readability.
    This is also a workaround to this Pydantic V2 issue https://github.com/pydantic/pydantic/issues/9571
    For proper JSON serialization, should use model_dump_json() instead when issue is resolved.
    """

    def convert_objects(obj):
        if isinstance(obj, dict):
            return {k: convert_objects(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [convert_objects(i) for i in obj]
        elif isinstance(obj, datetime):
            return obj.astimezone().isoformat()
        elif isinstance(obj, bytes):
            return obj.decode("utf-8")
        return obj

    dump = super().model_dump(**kwargs)
    return convert_objects(dump)

Action

Bases: BaseModel

Within a suggestion, all actions are logically AND'd together, such that a user selecting a suggestion selects all of the actions within it. When a suggestion contains multiple actions, the actions SHOULD be processed as per FHIR's rules for processing transactions with the CDS Client's fhirServer as the base url for the inferred full URL of the transaction bundle entries.

https://cds-hooks.org/specification/current/#action

Source code in healthchain/models/responses/cdsresponse.py
class Action(BaseModel):
    """
    Within a suggestion, all actions are logically AND'd together, such that a user selecting a
    suggestion selects all of the actions within it. When a suggestion contains multiple actions,
    the actions SHOULD be processed as per FHIR's rules for processing transactions with the CDS
    Client's fhirServer as the base url for the inferred full URL of the transaction bundle entries.

    https://cds-hooks.org/specification/current/#action
    """

    type: ActionTypeEnum
    description: str
    resource: Optional[Dict] = None
    resourceId: Optional[str] = None

    @model_validator(mode="after")
    def validate_action_type(self) -> Self:
        if self.type in [ActionTypeEnum.create, ActionTypeEnum.update]:
            assert (
                self.resource
            ), f"'resource' must be provided when type is '{self.type.value}'"
        else:
            assert (
                self.resourceId
            ), f"'resourceId' must be provided when type is '{self.type.value}'"

        return self

ActionTypeEnum

Bases: str, Enum

The type of action being performed

Source code in healthchain/models/responses/cdsresponse.py
class ActionTypeEnum(str, Enum):
    """
    The type of action being performed
    """

    create = "create"
    update = "update"
    delete = "delete"

CDSResponse

Bases: BaseModel

Represents the response from a CDS service.

This class models the structure of a CDS Hooks response, which includes cards for displaying information or suggestions to the user, and optional system actions that can be executed automatically.

ATTRIBUTE DESCRIPTION
cards

A list of Card objects to be displayed to the end user. Default is an empty list.

TYPE: List[Card]

systemActions

A list of Action objects representing actions that the CDS Client should execute as part of performing the decision support requested. This field is optional.

TYPE: Optional[List[Action]]

For more information, see: https://cds-hooks.org/specification/current/#cds-service-response

Source code in healthchain/models/responses/cdsresponse.py
class CDSResponse(BaseModel):
    """
    Represents the response from a CDS service.

    This class models the structure of a CDS Hooks response, which includes
    cards for displaying information or suggestions to the user, and optional
    system actions that can be executed automatically.

    Attributes:
        cards (List[Card]): A list of Card objects to be displayed to the end user.
            Default is an empty list.
        systemActions (Optional[List[Action]]): A list of Action objects representing
            actions that the CDS Client should execute as part of performing
            the decision support requested. This field is optional.

    For more information, see:
    https://cds-hooks.org/specification/current/#cds-service-response
    """

    cards: List[Card] = []
    systemActions: Optional[List[Action]] = None

Card

Bases: BaseModel

Cards can provide a combination of information (for reading), suggested actions (to be applied if a user selects them), and links (to launch an app if the user selects them). The CDS Client decides how to display cards, but this specification recommends displaying suggestions using buttons, and links using underlined text.

https://cds-hooks.org/specification/current/#card-attributes

Source code in healthchain/models/responses/cdsresponse.py
class Card(BaseModel):
    """
    Cards can provide a combination of information (for reading), suggested actions
    (to be applied if a user selects them), and links (to launch an app if the user selects them).
    The CDS Client decides how to display cards, but this specification recommends displaying suggestions
    using buttons, and links using underlined text.

    https://cds-hooks.org/specification/current/#card-attributes
    """

    summary: str = Field(..., max_length=140)
    indicator: IndicatorEnum
    source: Source
    uuid: Optional[str] = None
    detail: Optional[str] = None
    suggestions: Optional[List[Suggestion]] = None
    selectionBehavior: Optional[SelectionBehaviorEnum] = None
    overrideReasons: Optional[List[SimpleCoding]] = None
    links: Optional[List[Link]] = None

    @model_validator(mode="after")
    def validate_suggestions(self) -> Self:
        if self.suggestions is not None:
            assert self.selectionBehavior, f"'selectionBehavior' must be given if 'suggestions' is present! Choose from {[v for v in SelectionBehaviorEnum.value]}"
        return self

IndicatorEnum

Bases: str, Enum

Urgency/importance of what Card conveys. Allowed values, in order of increasing urgency, are: info, warning, critical. The CDS Client MAY use this field to help make UI display decisions such as sort order or coloring.

Source code in healthchain/models/responses/cdsresponse.py
class IndicatorEnum(str, Enum):
    """
    Urgency/importance of what Card conveys.
    Allowed values, in order of increasing urgency, are: info, warning, critical.
    The CDS Client MAY use this field to help make UI display decisions such as sort order or coloring.
    """

    info = "info"
    warning = "warning"
    critical = "critical"

Bases: BaseModel

  • CDS Client support for appContext requires additional coordination with the authorization server that is not described or specified in CDS Hooks nor SMART.

  • Autolaunchable is experimental

https://cds-hooks.org/specification/current/#link

Source code in healthchain/models/responses/cdsresponse.py
class Link(BaseModel):
    """
    * CDS Client support for appContext requires additional coordination with the authorization
    server that is not described or specified in CDS Hooks nor SMART.

    * Autolaunchable is experimental

    https://cds-hooks.org/specification/current/#link
    """

    label: str
    url: HttpUrl
    type: LinkTypeEnum
    appContext: Optional[str] = None
    autoLaunchable: Optional[bool]

    @model_validator(mode="after")
    def validate_link(self) -> Self:
        if self.appContext:
            assert (
                self.type == LinkTypeEnum.smart
            ), "'type' must be 'smart' for appContext to be valued."

        return self

LinkTypeEnum

Bases: str, Enum

The type of the given URL. There are two possible values for this field. A type of absolute indicates that the URL is absolute and should be treated as-is. A type of smart indicates that the URL is a SMART app launch URL and the CDS Client should ensure the SMART app launch URL is populated with the appropriate SMART launch parameters.

Source code in healthchain/models/responses/cdsresponse.py
class LinkTypeEnum(str, Enum):
    """
    The type of the given URL. There are two possible values for this field.
    A type of absolute indicates that the URL is absolute and should be treated as-is.
    A type of smart indicates that the URL is a SMART app launch URL and the CDS Client
    should ensure the SMART app launch URL is populated with the appropriate SMART
    launch parameters.
    """

    absolute = "absolute"
    smart = "smart"

SelectionBehaviorEnum

Bases: str, Enum

Describes the intended selection behavior of the suggestions in the card. Allowed values are: at-most-one, indicating that the user may choose none or at most one of the suggestions; any, indicating that the end user may choose any number of suggestions including none of them and all of them. CDS Clients that do not understand the value MUST treat the card as an error.

Source code in healthchain/models/responses/cdsresponse.py
class SelectionBehaviorEnum(str, Enum):
    """
    Describes the intended selection behavior of the suggestions in the card.
    Allowed values are: at-most-one, indicating that the user may choose none or
    at most one of the suggestions; any, indicating that the end user may choose
    any number of suggestions including none of them and all of them.
    CDS Clients that do not understand the value MUST treat the card as an error.
    """

    at_most_one = "at-most-one"
    any = "any"

SimpleCoding

Bases: BaseModel

The Coding data type captures the concept of a code. This coding type is a standalone data type in CDS Hooks modeled after a trimmed down version of the FHIR Coding data type.

Source code in healthchain/models/responses/cdsresponse.py
class SimpleCoding(BaseModel):
    """
    The Coding data type captures the concept of a code. This coding type is a standalone data type
    in CDS Hooks modeled after a trimmed down version of the FHIR Coding data type.
    """

    code: str
    system: str
    display: Optional[str] = None

Source

Bases: BaseModel

Grouping structure for the Source of the information displayed on this card. The source should be the primary source of guidance for the decision support Card represents.

https://cds-hooks.org/specification/current/#source

Source code in healthchain/models/responses/cdsresponse.py
class Source(BaseModel):
    """
    Grouping structure for the Source of the information displayed on this card.
    The source should be the primary source of guidance for the decision support Card represents.

    https://cds-hooks.org/specification/current/#source
    """

    label: str
    url: Optional[HttpUrl] = None
    icon: Optional[HttpUrl] = None
    topic: Optional[SimpleCoding] = None

Suggestion

Bases: BaseModel

Allows a service to suggest a set of changes in the context of the current activity (e.g. changing the dose of a medication currently being prescribed, for the order-sign activity). If suggestions are present, selectionBehavior MUST also be provided.

https://cds-hooks.org/specification/current/#suggestion

Source code in healthchain/models/responses/cdsresponse.py
class Suggestion(BaseModel):
    """
    Allows a service to suggest a set of changes in the context of the current activity
    (e.g. changing the dose of a medication currently being prescribed, for the order-sign activity).
    If suggestions are present, selectionBehavior MUST also be provided.

    https://cds-hooks.org/specification/current/#suggestion
    """

    label: str
    uuid: Optional[str] = None
    isRecommended: Optional[bool]
    actions: Optional[List[Action]] = []

CdaRequest

Bases: BaseModel

Source code in healthchain/models/requests/cdarequest.py
class CdaRequest(BaseModel):
    document: str
    session_id: Optional[str] = None
    work_type: Optional[str] = None
    organization_id: Optional[str] = None

    @classmethod
    def from_dict(cls, data: Dict):
        """
        Loads data from dict (xmltodict format)
        """
        return cls(document=xmltodict.unparse(data))

    def model_dump(self, *args, **kwargs) -> Dict:
        """
        Dumps document as dict with xmltodict
        """
        return xmltodict.parse(self.document)

    def model_dump_xml(self, *args, **kwargs) -> str:
        """
        Decodes and dumps document as an xml string
        """
        xml_dict = xmltodict.parse(self.document)
        document = search_key(xml_dict, "urn:Document")
        if document is None:
            log.warning("Couldn't find document under namespace 'urn:Document")
            return ""

        cda = base64.b64decode(document).decode("UTF-8")

        return cda

from_dict(data) classmethod

Loads data from dict (xmltodict format)

Source code in healthchain/models/requests/cdarequest.py
@classmethod
def from_dict(cls, data: Dict):
    """
    Loads data from dict (xmltodict format)
    """
    return cls(document=xmltodict.unparse(data))

model_dump(*args, **kwargs)

Dumps document as dict with xmltodict

Source code in healthchain/models/requests/cdarequest.py
def model_dump(self, *args, **kwargs) -> Dict:
    """
    Dumps document as dict with xmltodict
    """
    return xmltodict.parse(self.document)

model_dump_xml(*args, **kwargs)

Decodes and dumps document as an xml string

Source code in healthchain/models/requests/cdarequest.py
def model_dump_xml(self, *args, **kwargs) -> str:
    """
    Decodes and dumps document as an xml string
    """
    xml_dict = xmltodict.parse(self.document)
    document = search_key(xml_dict, "urn:Document")
    if document is None:
        log.warning("Couldn't find document under namespace 'urn:Document")
        return ""

    cda = base64.b64decode(document).decode("UTF-8")

    return cda

CdaResponse

Bases: BaseModel

Source code in healthchain/models/responses/cdaresponse.py
class CdaResponse(BaseModel):
    document: str
    error: Optional[str] = None

    @classmethod
    def from_dict(cls, data: Dict):
        """
        Loads data from dict (xmltodict format)
        """
        return cls(document=xmltodict.unparse(data))

    def model_dump(self, *args, **kwargs) -> Dict:
        """
        Dumps document as dict with xmltodict
        """
        return xmltodict.parse(self.document)

    def model_dump_xml(self, *args, **kwargs) -> str:
        """
        Decodes and dumps document as an xml string
        """
        xml_dict = xmltodict.parse(self.document)
        document = search_key(xml_dict, "tns:Document")
        if document is None:
            log.warning("Couldn't find document under namespace 'tns:Document")
            return ""

        cda = base64.b64decode(document).decode("UTF-8")

        return cda

from_dict(data) classmethod

Loads data from dict (xmltodict format)

Source code in healthchain/models/responses/cdaresponse.py
@classmethod
def from_dict(cls, data: Dict):
    """
    Loads data from dict (xmltodict format)
    """
    return cls(document=xmltodict.unparse(data))

model_dump(*args, **kwargs)

Dumps document as dict with xmltodict

Source code in healthchain/models/responses/cdaresponse.py
def model_dump(self, *args, **kwargs) -> Dict:
    """
    Dumps document as dict with xmltodict
    """
    return xmltodict.parse(self.document)

model_dump_xml(*args, **kwargs)

Decodes and dumps document as an xml string

Source code in healthchain/models/responses/cdaresponse.py
def model_dump_xml(self, *args, **kwargs) -> str:
    """
    Decodes and dumps document as an xml string
    """
    xml_dict = xmltodict.parse(self.document)
    document = search_key(xml_dict, "tns:Document")
    if document is None:
        log.warning("Couldn't find document under namespace 'tns:Document")
        return ""

    cda = base64.b64decode(document).decode("UTF-8")

    return cda