mirror of
https://github.com/langgenius/dify.git
synced 2026-04-05 04:40:30 +08:00
feat: upgrade langchain (#430)
Co-authored-by: jyong <718720800@qq.com>
This commit is contained in:
@@ -4,96 +4,81 @@ import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs import Node
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship
|
||||
from langchain.schema import Document
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment, Document
|
||||
from models.dataset import DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_document_to_index_task(document_id: str):
|
||||
def add_document_to_index_task(dataset_document_id: str):
|
||||
"""
|
||||
Async Add document to index
|
||||
:param document_id:
|
||||
|
||||
Usage: add_document_to_index.delay(document_id)
|
||||
"""
|
||||
logging.info(click.style('Start add document to index: {}'.format(document_id), fg='green'))
|
||||
logging.info(click.style('Start add document to index: {}'.format(dataset_document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(Document.id == document_id).first()
|
||||
if not document:
|
||||
dataset_document = db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document_id).first()
|
||||
if not dataset_document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
if document.indexing_status != 'completed':
|
||||
if dataset_document.indexing_status != 'completed':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'document_{}_indexing'.format(document.id)
|
||||
indexing_cache_key = 'document_{}_indexing'.format(dataset_document.id)
|
||||
|
||||
try:
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) \
|
||||
.order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
nodes = []
|
||||
previous_node = None
|
||||
documents = []
|
||||
for segment in segments:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: document.id
|
||||
}
|
||||
|
||||
if previous_node:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id
|
||||
|
||||
previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
previous_node = node
|
||||
documents.append(document)
|
||||
|
||||
nodes.append(node)
|
||||
|
||||
dataset = document.dataset
|
||||
dataset = dataset_document.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
# save vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.add_nodes(
|
||||
nodes=nodes,
|
||||
duplicate_check=True
|
||||
)
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
if index:
|
||||
index.add_texts(documents)
|
||||
|
||||
# save keyword index
|
||||
keyword_table_index.add_nodes(nodes)
|
||||
index = IndexBuilder.get_index(dataset, 'economy')
|
||||
if index:
|
||||
index.add_texts(documents)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Document added to index: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
click.style('Document added to index: {} latency: {}'.format(dataset_document.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("add document to index failed")
|
||||
document.enabled = False
|
||||
document.disabled_at = datetime.datetime.utcnow()
|
||||
document.status = 'error'
|
||||
document.error = str(e)
|
||||
dataset_document.enabled = False
|
||||
dataset_document.disabled_at = datetime.datetime.utcnow()
|
||||
dataset_document.status = 'error'
|
||||
dataset_document.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
|
||||
@@ -4,12 +4,10 @@ import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs import Node
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship
|
||||
from langchain.schema import Document
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
@@ -36,44 +34,41 @@ def add_segment_to_index_task(segment_id: str):
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: segment.document_id,
|
||||
}
|
||||
|
||||
previous_segment = segment.previous_segment
|
||||
if previous_segment:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_segment.index_node_id
|
||||
|
||||
next_segment = segment.next_segment
|
||||
if next_segment:
|
||||
relationships[DocumentRelationship.NEXT] = next_segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Segment has no dataset')
|
||||
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
# save vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.add_nodes(
|
||||
nodes=[node],
|
||||
duplicate_check=True
|
||||
)
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
if index:
|
||||
index.add_texts([document], duplicate_check=True)
|
||||
|
||||
# save keyword index
|
||||
keyword_table_index.add_nodes([node])
|
||||
index = IndexBuilder.get_index(dataset, 'economy')
|
||||
if index:
|
||||
index.add_texts([document])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment added to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
|
||||
@@ -4,8 +4,7 @@ import time
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset, DatasetKeywordTable, DatasetQuery, DatasetProcessRule, \
|
||||
AppDatasetJoin
|
||||
@@ -33,29 +32,24 @@ def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str,
|
||||
index_struct=index_struct
|
||||
)
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
documents = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
|
||||
index_doc_ids = [document.id for document in documents]
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
for index_doc_id in index_doc_ids:
|
||||
try:
|
||||
vector_index.del_doc(index_doc_id)
|
||||
except Exception:
|
||||
logging.exception("Delete doc index failed when dataset deleted.")
|
||||
continue
|
||||
if vector_index:
|
||||
try:
|
||||
vector_index.delete()
|
||||
except Exception:
|
||||
logging.exception("Delete doc index failed when dataset deleted.")
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
try:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
except Exception:
|
||||
logging.exception("Delete nodes index failed when dataset deleted.")
|
||||
try:
|
||||
kw_index.delete()
|
||||
except Exception:
|
||||
logging.exception("Delete nodes index failed when dataset deleted.")
|
||||
|
||||
for document in documents:
|
||||
db.session.delete(document)
|
||||
@@ -63,7 +57,6 @@ def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str,
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
db.session.query(DatasetKeywordTable).filter(DatasetKeywordTable.dataset_id == dataset_id).delete()
|
||||
db.session.query(DatasetProcessRule).filter(DatasetProcessRule.dataset_id == dataset_id).delete()
|
||||
db.session.query(DatasetQuery).filter(DatasetQuery.dataset_id == dataset_id).delete()
|
||||
db.session.query(AppDatasetJoin).filter(AppDatasetJoin.dataset_id == dataset_id).delete()
|
||||
|
||||
@@ -4,8 +4,7 @@ import time
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset
|
||||
|
||||
@@ -28,21 +27,23 @@ def clean_document_task(document_id: str, dataset_id: str):
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
if vector_index:
|
||||
vector_index.delete_by_document_id(document_id)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
kw_index.delete_by_ids(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
|
||||
@@ -5,8 +5,7 @@ from typing import List
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset, Document
|
||||
|
||||
@@ -29,22 +28,24 @@ def clean_notion_document_task(document_ids: List[str], dataset_id: str):
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
for document_id in document_ids:
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id
|
||||
).first()
|
||||
db.session.delete(document)
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
if vector_index:
|
||||
vector_index.delete_by_document_id(document_id)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
kw_index.delete_by_ids(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
@@ -3,10 +3,12 @@ import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship, Node
|
||||
from core.index.vector_index import VectorIndex
|
||||
from langchain.schema import Document
|
||||
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Document, Dataset
|
||||
from models.dataset import DocumentSegment, Dataset
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
|
||||
@shared_task
|
||||
@@ -24,49 +26,47 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
dataset = Dataset.query.filter_by(
|
||||
id=dataset_id
|
||||
).first()
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
documents = Document.query.filter_by(dataset_id=dataset_id).all()
|
||||
if documents:
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
for document in documents:
|
||||
# delete from vector index
|
||||
if action == "remove":
|
||||
vector_index.del_doc(document.id)
|
||||
elif action == "add":
|
||||
|
||||
if action == "remove":
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=True)
|
||||
index.delete()
|
||||
elif action == "add":
|
||||
dataset_documents = db.session.query(DatasetDocument).filter(
|
||||
DatasetDocument.dataset_id == dataset_id,
|
||||
DatasetDocument.indexing_status == 'completed',
|
||||
DatasetDocument.enabled == True,
|
||||
DatasetDocument.archived == False,
|
||||
).all()
|
||||
|
||||
if dataset_documents:
|
||||
# save vector index
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=True)
|
||||
for dataset_document in dataset_documents:
|
||||
# delete from vector index
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) .order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
nodes = []
|
||||
previous_node = None
|
||||
documents = []
|
||||
for segment in segments:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: document.id
|
||||
}
|
||||
|
||||
if previous_node:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id
|
||||
|
||||
previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
document = Document(
|
||||
page_content=segment.content,
|
||||
metadata={
|
||||
"doc_id": segment.index_node_id,
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
)
|
||||
|
||||
previous_node = node
|
||||
nodes.append(node)
|
||||
documents.append(document)
|
||||
|
||||
# save vector index
|
||||
vector_index.add_nodes(
|
||||
nodes=nodes,
|
||||
duplicate_check=True
|
||||
)
|
||||
index.add_texts(documents)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
|
||||
@@ -6,11 +6,9 @@ import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.data_source.notion import NotionPageReader
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.data_loader.loader.notion import NotionLoader
|
||||
from core.index.index import IndexBuilder
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document, Dataset, DocumentSegment
|
||||
from models.source import DataSourceBinding
|
||||
@@ -43,6 +41,7 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
||||
raise ValueError("no notion page found")
|
||||
workspace_id = data_source_info['notion_workspace_id']
|
||||
page_id = data_source_info['notion_page_id']
|
||||
page_type = data_source_info['type']
|
||||
page_edited_time = data_source_info['last_edited_time']
|
||||
data_source_binding = DataSourceBinding.query.filter(
|
||||
db.and_(
|
||||
@@ -54,8 +53,16 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
||||
).first()
|
||||
if not data_source_binding:
|
||||
raise ValueError('Data source binding not found.')
|
||||
reader = NotionPageReader(integration_token=data_source_binding.access_token)
|
||||
last_edited_time = reader.get_page_last_edited_time(page_id)
|
||||
|
||||
loader = NotionLoader(
|
||||
notion_access_token=data_source_binding.access_token,
|
||||
notion_workspace_id=workspace_id,
|
||||
notion_obj_id=page_id,
|
||||
notion_page_type=page_type
|
||||
)
|
||||
|
||||
last_edited_time = loader.get_notion_last_edited_time()
|
||||
|
||||
# check the page is updated
|
||||
if last_edited_time != page_edited_time:
|
||||
document.indexing_status = 'parsing'
|
||||
@@ -68,18 +75,19 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
if vector_index:
|
||||
vector_index.delete_by_document_id(document_id)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
kw_index.delete_by_ids(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
@@ -89,21 +97,13 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
||||
click.style('Cleaned document when document update data source or process rule: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document update data source or process rule failed")
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run([document])
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('update document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document update paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except ProviderTokenNotInitError as e:
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e.description)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume update document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except DocumentIsPausedException as ex:
|
||||
logging.info(click.style(str(ex), fg='yellow'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -7,7 +7,6 @@ from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document
|
||||
|
||||
@@ -22,9 +21,9 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
||||
Usage: document_indexing_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
documents = []
|
||||
start_at = time.perf_counter()
|
||||
for document_id in document_ids:
|
||||
logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id,
|
||||
@@ -44,17 +43,8 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(documents)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Processed document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except ProviderTokenNotInitError as e:
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e.description)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
logging.info(click.style('Processed dataset: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException as ex:
|
||||
logging.info(click.style(str(ex), fg='yellow'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -6,10 +6,8 @@ import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document, Dataset, DocumentSegment
|
||||
|
||||
@@ -44,18 +42,19 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
|
||||
if not dataset:
|
||||
raise Exception('Dataset not found')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
if vector_index:
|
||||
vector_index.delete_by_ids(index_node_ids)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
kw_index.delete_by_ids(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
@@ -65,21 +64,13 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
|
||||
click.style('Cleaned document when document update data source or process rule: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document update data source or process rule failed")
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run([document])
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('update document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document update paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except ProviderTokenNotInitError as e:
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e.description)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume update document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except DocumentIsPausedException as ex:
|
||||
logging.info(click.style(str(ex), fg='yellow'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
@@ -41,11 +40,7 @@ def recover_document_indexing_task(dataset_id: str, document_id: str):
|
||||
indexing_runner.run_in_indexing_status(document)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Processed document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except DocumentIsPausedException as ex:
|
||||
logging.info(click.style(str(ex), fg='yellow'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@@ -5,8 +5,7 @@ import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment, Document
|
||||
@@ -38,17 +37,17 @@ def remove_document_from_index_task(document_id: str):
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from vector index
|
||||
vector_index.del_doc(document.id)
|
||||
vector_index.delete_by_document_id(document.id)
|
||||
|
||||
# delete from keyword index
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document.id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
kw_index.delete_by_ids(index_node_ids)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
|
||||
@@ -5,8 +5,7 @@ import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from core.index.index import IndexBuilder
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
@@ -36,17 +35,28 @@ def remove_segment_from_index_task(segment_id: str):
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Segment has no dataset')
|
||||
logging.info(click.style('Segment {} has no dataset, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style('Segment {} has no document, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
logging.info(click.style('Segment {} document status is invalid, pass.'.format(segment.id), fg='cyan'))
|
||||
return
|
||||
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.del_nodes([segment.index_node_id])
|
||||
if vector_index:
|
||||
vector_index.delete_by_ids([segment.index_node_id])
|
||||
|
||||
# delete from keyword index
|
||||
keyword_table_index.del_nodes([segment.index_node_id])
|
||||
kw_index.delete_by_ids([segment.index_node_id])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment removed from index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
|
||||
Reference in New Issue
Block a user