Skip to content
64 changes: 8 additions & 56 deletions api/apps/kb_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import numpy as np

from api.db import LLMType
from api.db.services import duplicate_name
from api.db.services.llm_service import LLMBundle
from api.db.services.document_service import DocumentService, queue_raptor_o_graphrag_tasks
from api.db.services.file2document_service import File2DocumentService
Expand All @@ -31,7 +30,6 @@
from api.db.services.task_service import TaskService, GRAPH_RAPTOR_FAKE_DOC_ID
from api.db.services.user_service import TenantService, UserTenantService
from api.utils.api_utils import get_error_data_result, server_error_response, get_data_error_result, validate_request, not_allowed_parameters
from common.misc_utils import get_uuid
from api.db import PipelineTaskType, StatusEnum, FileSource, VALID_FILE_TYPES, VALID_TASK_STATUS
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.db_models import File
Expand All @@ -50,63 +48,17 @@
@validate_request("name")
def create():
req = request.json
dataset_name = req["name"]
if not isinstance(dataset_name, str):
return get_data_error_result(message="Dataset name must be string.")
if dataset_name.strip() == "":
return get_data_error_result(message="Dataset name can't be empty.")
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
return get_data_error_result(
message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")

dataset_name = dataset_name.strip()
dataset_name = duplicate_name(
KnowledgebaseService.query,
name=dataset_name,
tenant_id=current_user.id,
status=StatusEnum.VALID.value)
req = KnowledgebaseService.create_with_name(
name = req.pop("name", None),
tenant_id = current_user.id,
parser_id = req.pop("parser_id", None),
**req
)

try:
req["id"] = get_uuid()
req["name"] = dataset_name
req["tenant_id"] = current_user.id
req["created_by"] = current_user.id
if not req.get("parser_id"):
req["parser_id"] = "naive"
e, t = TenantService.get_by_id(current_user.id)
if not e:
return get_data_error_result(message="Tenant not found.")

req["parser_config"] = {
"layout_recognize": "DeepDOC",
"chunk_token_num": 512,
"delimiter": "\n",
"auto_keywords": 0,
"auto_questions": 0,
"html4excel": False,
"topn_tags": 3,
"raptor": {
"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0
},
"graphrag": {
"use_graphrag": True,
"entity_types": [
"organization",
"person",
"geo",
"event",
"category"
],
"method": "light"
}
}
if not KnowledgebaseService.save(**req):
return get_data_error_result()
return get_json_result(data={"kb_id": req["id"]})
return get_json_result(data={"kb_id":req["id"]})
except Exception as e:
return server_error_response(e)

Expand Down
76 changes: 36 additions & 40 deletions api/apps/sdk/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,10 @@
from api.db.services.file_service import FileService
from api.db.services.knowledgebase_service import KnowledgebaseService
from api.db.services.user_service import TenantService
from common.misc_utils import get_uuid
from api.utils.api_utils import (
deep_merge,
get_error_argument_result,
get_error_data_result,
get_error_operating_result,
get_error_permission_result,
get_parser_config,
get_result,
Expand Down Expand Up @@ -80,29 +78,28 @@ def create(tenant_id):
properties:
name:
type: string
description: Name of the dataset.
description: Dataset name (required).
avatar:
type: string
description: Base64 encoding of the avatar.
description: Optional base64-encoded avatar image.
description:
type: string
description: Description of the dataset.
description: Optional dataset description.
embedding_model:
type: string
description: Embedding model Name.
description: Optional embedding model name; if omitted, the tenant's default embedding model is used.
permission:
type: string
enum: ['me', 'team']
description: Dataset permission.
description: Visibility of the dataset (private to me or shared with team).
chunk_method:
type: string
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
"picture", "presentation", "qa", "table", "tag"
]
description: Chunking method.
"picture", "presentation", "qa", "table", "tag"]
description: Chunking method; if omitted, defaults to "naive".
parser_config:
type: object
description: Parser configuration.
description: Optional parser configuration; server-side defaults will be applied.
responses:
200:
description: Successful operation.
Expand All @@ -117,44 +114,43 @@ def create(tenant_id):
# |----------------|-------------|
# | embedding_model| embd_id |
# | chunk_method | parser_id |

req, err = validate_and_parse_json_request(request, CreateDatasetReq)
if err is not None:
return get_error_argument_result(err)

try:
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")

req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
req["id"] = get_uuid()
req["tenant_id"] = tenant_id
req["created_by"] = tenant_id

ok, t = TenantService.get_by_id(tenant_id)

req = KnowledgebaseService.create_with_name(
name = req.pop("name", None),
tenant_id = tenant_id,
parser_id = req.pop("parser_id", None),
**req
)

# Insert embedding model(embd id)
ok, t = TenantService.get_by_id(tenant_id)
if not ok:
return get_error_permission_result(message="Tenant not found")
if not req.get("embd_id"):
req["embd_id"] = t.embd_id
else:
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
if not ok:
return get_error_permission_result(message="Tenant not found")

if not req.get("embd_id"):
req["embd_id"] = t.embd_id
else:
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
if not ok:
return err

if not KnowledgebaseService.save(**req):
return get_error_data_result(message="Create dataset error.(Database error)")
return err

ok, k = KnowledgebaseService.get_by_id(req["id"])
if not ok:
return get_error_data_result(message="Dataset created failed")

response_data = remap_dictionary_keys(k.to_dict())
return get_result(data=response_data)
except OperationalError as e:
try:
if not KnowledgebaseService.save(**req):
return get_error_data_result()
ok, k = KnowledgebaseService.get_by_id(req["id"])
if not ok:
return get_error_data_result(message="Dataset created failed")

response_data = remap_dictionary_keys(k.to_dict())
return get_result(data=response_data)
except Exception as e:
logging.exception(e)
return get_error_data_result(message="Database operation failed")


@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
@token_required
def delete(tenant_id):
Expand Down
64 changes: 63 additions & 1 deletion api/db/services/knowledgebase_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,11 @@
from api.db.db_models import DB, Document, Knowledgebase, User, UserTenant, UserCanvas
from api.db.services.common_service import CommonService
from common.time_utils import current_timestamp, datetime_format

from api.db.services import duplicate_name
from api.db.services.user_service import TenantService
from common.misc_utils import get_uuid
from api.constants import DATASET_NAME_LIMIT
from api.utils.api_utils import get_parser_config, get_data_error_result

class KnowledgebaseService(CommonService):
"""Service class for managing knowledge base operations.
Expand Down Expand Up @@ -363,6 +367,64 @@ def get_all_ids(cls):
# List of all knowledge base IDs
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]


@classmethod
@DB.connection_context()
def create_with_name(
cls,
*,
name: str,
tenant_id: str,
parser_id: str | None = None,
**kwargs
):
"""Create a dataset (knowledgebase) by name with kb_app defaults.

This encapsulates the creation logic used in kb_app.create so other callers
(including RESTful endpoints) can reuse the same behavior.

Returns:
(ok: bool, model_or_msg): On success, returns (True, Knowledgebase model instance);
on failure, returns (False, error_message).
"""
# Validate name
if not isinstance(name, str):
return get_data_error_result(message="Dataset name must be string.")
dataset_name = name.strip()
if dataset_name == "":
return get_data_error_result(message="Dataset name can't be empty.")
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
return get_data_error_result(message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")

# Deduplicate name within tenant
dataset_name = duplicate_name(
cls.query,
name=dataset_name,
tenant_id=tenant_id,
status=StatusEnum.VALID.value,
)

# Verify tenant exists
ok, _t = TenantService.get_by_id(tenant_id)
if not ok:
return False, "Tenant not found."

# Build payload
kb_id = get_uuid()
payload = {
"id": kb_id,
"name": dataset_name,
"tenant_id": tenant_id,
"created_by": tenant_id,
"parser_id": (parser_id or "naive"),
**kwargs
}

# Default parser_config (align with kb_app.create) — do not accept external overrides
payload["parser_config"] = get_parser_config(parser_id, kwargs.get("parser_config"))
return payload


@classmethod
@DB.connection_context()
def get_list(cls, joined_tenant_ids, user_id,
Expand Down
30 changes: 28 additions & 2 deletions api/utils/api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,8 +419,34 @@ def get_parser_config(chunk_method, parser_config):

# Define default configurations for each chunking method
key_mapping = {
"naive": {"chunk_token_num": 512, "delimiter": r"\n", "html4excel": False, "layout_recognize": "DeepDOC",
"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
"naive": {
"layout_recognize": "DeepDOC",
"chunk_token_num": 512,
"delimiter": "\n",
"auto_keywords": 0,
"auto_questions": 0,
"html4excel": False,
"topn_tags": 3,
"raptor": {
"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,
},
"graphrag": {
"use_graphrag": True,
"entity_types": [
"organization",
"person",
"geo",
"event",
"category",
],
"method": "light",
},
},
"qa": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
"tag": None,
"resume": None,
Expand Down
6 changes: 3 additions & 3 deletions sdk/python/test/test_frontend_api/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,18 @@ def test_invalid_name_dataset(get_auth):
# create dataset
# with pytest.raises(Exception) as e:
res = create_dataset(get_auth, 0)
assert res['code'] == 102
assert res['code'] == 100

res = create_dataset(get_auth, "")
assert res['code'] == 102
assert res['code'] == 100

long_string = ""

while len(long_string.encode("utf-8")) <= DATASET_NAME_LIMIT:
long_string += random.choice(string.ascii_letters + string.digits)

res = create_dataset(get_auth, long_string)
assert res['code'] == 102
assert res['code'] == 100
print(res)


Expand Down
29 changes: 29 additions & 0 deletions test/testcases/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,32 @@
DOCUMENT_NAME_LIMIT = 255
CHAT_ASSISTANT_NAME_LIMIT = 255
SESSION_WITH_CHAT_NAME_LIMIT = 255

DEFAULT_PARSER_CONFIG = {
"layout_recognize": "DeepDOC",
"chunk_token_num": 512,
"delimiter": "\n",
"auto_keywords": 0,
"auto_questions": 0,
"html4excel": False,
"topn_tags": 3,
"raptor": {
"use_raptor": True,
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
"max_token": 256,
"threshold": 0.1,
"max_cluster": 64,
"random_seed": 0,
},
"graphrag": {
"use_graphrag": True,
"entity_types": [
"organization",
"person",
"geo",
"event",
"category",
],
"method": "light",
},
}
Loading