|
| 1 | +from typing import Optional |
| 2 | +from uuid import UUID |
| 3 | +import requests |
| 4 | +from sempy._utils._log import log |
| 5 | +import pandas as pd |
| 6 | +from sempy_labs._helper_functions import ( |
| 7 | + resolve_lakehouse_name_and_id, |
| 8 | + resolve_workspace_id, |
| 9 | + resolve_lakehouse_id, |
| 10 | + _create_dataframe, |
| 11 | + _update_dataframe_datatypes, |
| 12 | + _base_api, |
| 13 | + resolve_workspace_name_and_id, |
| 14 | +) |
| 15 | + |
| 16 | + |
| 17 | +def _get_headers(): |
| 18 | + |
| 19 | + import notebookutils |
| 20 | + |
| 21 | + token = notebookutils.credentials.getToken("storage") |
| 22 | + headers = {"Authorization": f"Bearer {token}"} |
| 23 | + return headers |
| 24 | + |
| 25 | + |
| 26 | +@log |
| 27 | +def list_schemas( |
| 28 | + lakehouse: Optional[str | UUID] = None, workspace: Optional[str | UUID] = None |
| 29 | +) -> pd.DataFrame: |
| 30 | + |
| 31 | + columns = { |
| 32 | + "Schema Name": "str", |
| 33 | + # "Created At": "datetime", |
| 34 | + # "Updated At": "datetime" |
| 35 | + } |
| 36 | + df = _create_dataframe(columns=columns) |
| 37 | + workspace_id = resolve_workspace_id(workspace) |
| 38 | + item_id = resolve_lakehouse_id(lakehouse, workspace) |
| 39 | + response = requests.get( |
| 40 | + f"https://onelake.table.fabric.microsoft.com/delta/{workspace_id}/{item_id}/api/2.1/unity-catalog/schemas?catalog_name={item_id}", |
| 41 | + headers=_get_headers(), |
| 42 | + ) |
| 43 | + |
| 44 | + rows = [] |
| 45 | + for s in response.json().get("schemas", []): |
| 46 | + rows.append( |
| 47 | + { |
| 48 | + "Schema Name": s.get("name", {}), |
| 49 | + # "Created At": s.get('created_at', {}), |
| 50 | + # "Updated At": s.get('updated_at', {}), |
| 51 | + } |
| 52 | + ) |
| 53 | + |
| 54 | + if rows: |
| 55 | + df = pd.DataFrame(rows, columns=list(columns.keys())) |
| 56 | + _update_dataframe_datatypes(df, columns) |
| 57 | + |
| 58 | + return df |
| 59 | + |
| 60 | + |
| 61 | +def list_tables( |
| 62 | + lakehouse: Optional[str | UUID] = None, |
| 63 | + workspace: Optional[str | UUID] = None, |
| 64 | + schema: Optional[str] = None, |
| 65 | +) -> pd.DataFrame: |
| 66 | + |
| 67 | + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) |
| 68 | + (item_name, item_id) = resolve_lakehouse_name_and_id(lakehouse, workspace) |
| 69 | + |
| 70 | + response = _base_api(f"/v1/workspaces/{workspace_id}/lakehouses/{item_id})") |
| 71 | + default_schema = response.json().get("properties", {}).get("defaultSchema", None) |
| 72 | + schema_enabled = True if default_schema else False |
| 73 | + |
| 74 | + columns = { |
| 75 | + "Workspace Name": "str", |
| 76 | + "Lakehouse Name": "str", |
| 77 | + "Table Name": "str", |
| 78 | + "Schema Name": "str", |
| 79 | + "Format": "str", |
| 80 | + "Type": "str", |
| 81 | + "Location": "str", |
| 82 | + } |
| 83 | + df = _create_dataframe(columns=columns) |
| 84 | + |
| 85 | + rows = [] |
| 86 | + if schema_enabled: |
| 87 | + schemas = list_schemas(lakehouse=lakehouse, workspace=workspace) |
| 88 | + # Loop through schemas |
| 89 | + for _, r in schemas.iterrows(): |
| 90 | + schema_name = r["Schema Name"] |
| 91 | + response = requests.get( |
| 92 | + f"https://onelake.table.fabric.microsoft.com/delta/{workspace_id}/{item_id}/api/2.1/unity-catalog/tables?catalog_name={item_id}&schema_name={schema_name}", |
| 93 | + headers=_get_headers(), |
| 94 | + ) |
| 95 | + # Loop through tables |
| 96 | + for t in response.json().get("tables", []): |
| 97 | + rows.append( |
| 98 | + { |
| 99 | + "Workspace Name": workspace_name, |
| 100 | + "Lakehouse Name": item_name, |
| 101 | + "Table Name": t.get("name", {}), |
| 102 | + "Schema Name": schema_name, |
| 103 | + "Format": t.get("data_source_format", {}), |
| 104 | + "Type": None, |
| 105 | + "Location": t.get("storage_location", {}), |
| 106 | + } |
| 107 | + ) |
| 108 | + else: |
| 109 | + responses = _base_api( |
| 110 | + request=f"v1/workspaces/{workspace_id}/lakehouses/{item_id}/tables", |
| 111 | + uses_pagination=True, |
| 112 | + client="fabric_sp", |
| 113 | + ) |
| 114 | + for r in responses: |
| 115 | + for i in r.get("data", []): |
| 116 | + rows.append( |
| 117 | + { |
| 118 | + "Workspace Name": workspace_name, |
| 119 | + "Lakehouse Name": item_name, |
| 120 | + "Schema Name": None, |
| 121 | + "Table Name": i.get("name"), |
| 122 | + "Format": i.get("format"), |
| 123 | + "Type": i.get("type"), |
| 124 | + "Location": i.get("location"), |
| 125 | + } |
| 126 | + ) |
| 127 | + |
| 128 | + if rows: |
| 129 | + df = pd.DataFrame(rows, columns=list(columns.keys())) |
| 130 | + |
| 131 | + return df |
0 commit comments