|
1 | 1 | import sempy.fabric as fabric |
2 | 2 | from sempy_labs._helper_functions import ( |
| 3 | + resolve_workspace_id, |
3 | 4 | resolve_workspace_name_and_id, |
4 | 5 | create_relationship_name, |
5 | 6 | format_dax_object_name, |
@@ -616,33 +617,50 @@ def list_lakehouses(workspace: Optional[str | UUID] = None) -> pd.DataFrame: |
616 | 617 | "SQL Endpoint Connection String": "string", |
617 | 618 | "SQL Endpoint ID": "string", |
618 | 619 | "SQL Endpoint Provisioning Status": "string", |
| 620 | + "Schema Enabled": "bool", |
| 621 | + "Default Schema": "string", |
| 622 | + "Sensitivity Label Id": "string", |
619 | 623 | } |
620 | 624 | df = _create_dataframe(columns=columns) |
621 | 625 |
|
622 | | - (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) |
| 626 | + workspace_id = resolve_workspace_id(workspace) |
623 | 627 |
|
624 | 628 | responses = _base_api( |
625 | 629 | request=f"/v1/workspaces/{workspace_id}/lakehouses", |
626 | 630 | uses_pagination=True, |
627 | 631 | client="fabric_sp", |
628 | 632 | ) |
629 | 633 |
|
| 634 | + rows = [] |
630 | 635 | for r in responses: |
631 | 636 | for v in r.get("value", []): |
632 | 637 | prop = v.get("properties", {}) |
633 | 638 | sqlEPProp = prop.get("sqlEndpointProperties", {}) |
| 639 | + default_schema = prop.get("defaultSchema", None) |
634 | 640 |
|
635 | | - new_data = { |
636 | | - "Lakehouse Name": v.get("displayName"), |
637 | | - "Lakehouse ID": v.get("id"), |
638 | | - "Description": v.get("description"), |
639 | | - "OneLake Tables Path": prop.get("oneLakeTablesPath"), |
640 | | - "OneLake Files Path": prop.get("oneLakeFilesPath"), |
641 | | - "SQL Endpoint Connection String": sqlEPProp.get("connectionString"), |
642 | | - "SQL Endpoint ID": sqlEPProp.get("id"), |
643 | | - "SQL Endpoint Provisioning Status": sqlEPProp.get("provisioningStatus"), |
644 | | - } |
645 | | - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) |
| 641 | + rows.append( |
| 642 | + { |
| 643 | + "Lakehouse Name": v.get("displayName"), |
| 644 | + "Lakehouse ID": v.get("id"), |
| 645 | + "Description": v.get("description"), |
| 646 | + "OneLake Tables Path": prop.get("oneLakeTablesPath"), |
| 647 | + "OneLake Files Path": prop.get("oneLakeFilesPath"), |
| 648 | + "SQL Endpoint Connection String": sqlEPProp.get("connectionString"), |
| 649 | + "SQL Endpoint ID": sqlEPProp.get("id"), |
| 650 | + "SQL Endpoint Provisioning Status": sqlEPProp.get( |
| 651 | + "provisioningStatus" |
| 652 | + ), |
| 653 | + "Schema Enabled": True if default_schema else False, |
| 654 | + "Default Schema": default_schema, |
| 655 | + "Sensitivity Label Id": v.get("sensitivityLabel", {}).get( |
| 656 | + "sensitivityLabelId" |
| 657 | + ), |
| 658 | + } |
| 659 | + ) |
| 660 | + |
| 661 | + if rows: |
| 662 | + df = pd.DataFrame(rows, columns=list(columns.keys())) |
| 663 | + _update_dataframe_datatypes(dataframe=df, column_map=columns) |
646 | 664 |
|
647 | 665 | return df |
648 | 666 |
|
@@ -672,20 +690,25 @@ def list_datamarts(workspace: Optional[str | UUID] = None) -> pd.DataFrame: |
672 | 690 | } |
673 | 691 | df = _create_dataframe(columns=columns) |
674 | 692 |
|
675 | | - (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) |
| 693 | + workspace_id = resolve_workspace_id(workspace) |
676 | 694 |
|
677 | 695 | responses = _base_api( |
678 | 696 | request=f"/v1/workspaces/{workspace_id}/datamarts", uses_pagination=True |
679 | 697 | ) |
680 | 698 |
|
| 699 | + rows = [] |
681 | 700 | for r in responses: |
682 | 701 | for v in r.get("value", []): |
683 | | - new_data = { |
684 | | - "Datamart Name": v.get("displayName"), |
685 | | - "Datamart ID": v.get("id"), |
686 | | - "Description": v.get("description"), |
687 | | - } |
688 | | - df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) |
| 702 | + rows.append( |
| 703 | + { |
| 704 | + "Datamart Name": v.get("displayName"), |
| 705 | + "Datamart ID": v.get("id"), |
| 706 | + "Description": v.get("description"), |
| 707 | + } |
| 708 | + ) |
| 709 | + |
| 710 | + if rows: |
| 711 | + df = pd.DataFrame(rows, columns=list(columns.keys())) |
689 | 712 |
|
690 | 713 | return df |
691 | 714 |
|
|
0 commit comments