mirror of
https://github.com/langgenius/dify.git
synced 2026-04-05 04:40:30 +08:00
Initial commit
This commit is contained in:
99
api/tasks/add_document_to_index_task.py
Normal file
99
api/tasks/add_document_to_index_task.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs import Node
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment, Document
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_document_to_index_task(document_id: str):
|
||||
"""
|
||||
Async Add document to index
|
||||
:param document_id:
|
||||
|
||||
Usage: add_document_to_index.delay(document_id)
|
||||
"""
|
||||
logging.info(click.style('Start add document to index: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(Document.id == document_id).first()
|
||||
if not document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
if document.indexing_status != 'completed':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'document_{}_indexing'.format(document.id)
|
||||
|
||||
try:
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) \
|
||||
.order_by(DocumentSegment.position.asc()).all()
|
||||
|
||||
nodes = []
|
||||
previous_node = None
|
||||
for segment in segments:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: document.id
|
||||
}
|
||||
|
||||
if previous_node:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_node.doc_id
|
||||
|
||||
previous_node.relationships[DocumentRelationship.NEXT] = segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
)
|
||||
|
||||
previous_node = node
|
||||
|
||||
nodes.append(node)
|
||||
|
||||
dataset = document.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
# save vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.add_nodes(
|
||||
nodes=nodes,
|
||||
duplicate_check=True
|
||||
)
|
||||
|
||||
# save keyword index
|
||||
keyword_table_index.add_nodes(nodes)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Document added to index: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("add document to index failed")
|
||||
document.enabled = False
|
||||
document.disabled_at = datetime.datetime.utcnow()
|
||||
document.status = 'error'
|
||||
document.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
88
api/tasks/add_segment_to_index_task.py
Normal file
88
api/tasks/add_segment_to_index_task.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from llama_index.data_structs import Node
|
||||
from llama_index.data_structs.node_v2 import DocumentRelationship
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def add_segment_to_index_task(segment_id: str):
|
||||
"""
|
||||
Async Add segment to index
|
||||
:param segment_id:
|
||||
|
||||
Usage: add_segment_to_index.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style('Start add segment to index: {}'.format(segment_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
raise NotFound('Segment not found')
|
||||
|
||||
if segment.status != 'completed':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
relationships = {
|
||||
DocumentRelationship.SOURCE: segment.document_id,
|
||||
}
|
||||
|
||||
previous_segment = segment.previous_segment
|
||||
if previous_segment:
|
||||
relationships[DocumentRelationship.PREVIOUS] = previous_segment.index_node_id
|
||||
|
||||
next_segment = segment.next_segment
|
||||
if next_segment:
|
||||
relationships[DocumentRelationship.NEXT] = next_segment.index_node_id
|
||||
|
||||
node = Node(
|
||||
doc_id=segment.index_node_id,
|
||||
doc_hash=segment.index_node_hash,
|
||||
text=segment.content,
|
||||
extra_info=None,
|
||||
node_info=None,
|
||||
relationships=relationships
|
||||
)
|
||||
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Segment has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
# save vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.add_nodes(
|
||||
nodes=[node],
|
||||
duplicate_check=True
|
||||
)
|
||||
|
||||
# save keyword index
|
||||
keyword_table_index.add_nodes([node])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment added to index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
except Exception as e:
|
||||
logging.exception("add segment to index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = datetime.datetime.utcnow()
|
||||
segment.status = 'error'
|
||||
segment.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
77
api/tasks/clean_dataset_task.py
Normal file
77
api/tasks/clean_dataset_task.py
Normal file
@@ -0,0 +1,77 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset, DatasetKeywordTable, DatasetQuery, DatasetProcessRule, \
|
||||
AppDatasetJoin
|
||||
|
||||
|
||||
@shared_task
|
||||
def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str, index_struct: str):
|
||||
"""
|
||||
Clean dataset when dataset deleted.
|
||||
:param dataset_id: dataset id
|
||||
:param tenant_id: tenant id
|
||||
:param indexing_technique: indexing technique
|
||||
:param index_struct: index struct dict
|
||||
|
||||
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
|
||||
"""
|
||||
logging.info(click.style('Start clean dataset when dataset deleted: {}'.format(dataset_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = Dataset(
|
||||
id=dataset_id,
|
||||
tenant_id=tenant_id,
|
||||
indexing_technique=indexing_technique,
|
||||
index_struct=index_struct
|
||||
)
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
documents = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
|
||||
index_doc_ids = [document.id for document in documents]
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
for index_doc_id in index_doc_ids:
|
||||
try:
|
||||
vector_index.del_doc(index_doc_id)
|
||||
except Exception:
|
||||
logging.exception("Delete doc index failed when dataset deleted.")
|
||||
continue
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
try:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
except Exception:
|
||||
logging.exception("Delete nodes index failed when dataset deleted.")
|
||||
|
||||
for document in documents:
|
||||
db.session.delete(document)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
db.session.query(DatasetKeywordTable).filter(DatasetKeywordTable.dataset_id == dataset_id).delete()
|
||||
db.session.query(DatasetProcessRule).filter(DatasetProcessRule.dataset_id == dataset_id).delete()
|
||||
db.session.query(DatasetQuery).filter(DatasetQuery.dataset_id == dataset_id).delete()
|
||||
db.session.query(AppDatasetJoin).filter(AppDatasetJoin.dataset_id == dataset_id).delete()
|
||||
|
||||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Cleaned dataset when dataset deleted: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Cleaned dataset when dataset deleted failed")
|
||||
52
api/tasks/clean_document_task.py
Normal file
52
api/tasks/clean_document_task.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset
|
||||
|
||||
|
||||
@shared_task
|
||||
def clean_document_task(document_id: str, dataset_id: str):
|
||||
"""
|
||||
Clean document when document deleted.
|
||||
:param document_id: document id
|
||||
:param dataset_id: dataset id
|
||||
|
||||
Usage: clean_document_task.delay(document_id, dataset_id)
|
||||
"""
|
||||
logging.info(click.style('Start clean document when document deleted: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.del_nodes(index_node_ids)
|
||||
|
||||
# delete from keyword index
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Cleaned document when document deleted: {} latency: {}'.format(document_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document deleted failed")
|
||||
56
api/tasks/document_indexing_task.py
Normal file
56
api/tasks/document_indexing_task.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from core.llm.error import ProviderTokenNotInitError
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document
|
||||
|
||||
|
||||
@shared_task
|
||||
def document_indexing_task(dataset_id: str, document_id: str):
|
||||
"""
|
||||
Async process document
|
||||
:param dataset_id:
|
||||
:param document_id:
|
||||
|
||||
Usage: document_indexing_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id,
|
||||
Document.dataset_id == dataset_id
|
||||
).first()
|
||||
|
||||
if not document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
document.indexing_status = 'parsing'
|
||||
document.processing_started_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(document)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Processed document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except ProviderTokenNotInitError as e:
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e.description)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
46
api/tasks/generate_conversation_summary_task.py
Normal file
46
api/tasks/generate_conversation_summary_task.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.generator.llm_generator import LLMGenerator
|
||||
from extensions.ext_database import db
|
||||
from models.model import Conversation, Message
|
||||
|
||||
|
||||
@shared_task
|
||||
def generate_conversation_summary_task(conversation_id: str):
|
||||
"""
|
||||
Async Generate conversation summary
|
||||
:param conversation_id:
|
||||
|
||||
Usage: generate_conversation_summary_task.delay(conversation_id)
|
||||
"""
|
||||
logging.info(click.style('Start generate conversation summary: {}'.format(conversation_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
conversation = db.session.query(Conversation).filter(Conversation.id == conversation_id).first()
|
||||
if not conversation:
|
||||
raise NotFound('Conversation not found')
|
||||
|
||||
try:
|
||||
# get conversation messages count
|
||||
history_message_count = conversation.message_count
|
||||
if history_message_count >= 5:
|
||||
app_model = conversation.app
|
||||
if not app_model:
|
||||
return
|
||||
|
||||
history_messages = db.session.query(Message).filter(Message.conversation_id == conversation.id) \
|
||||
.order_by(Message.created_at.asc()).all()
|
||||
|
||||
conversation.summary = LLMGenerator.generate_conversation_summary(app_model.tenant_id, history_messages)
|
||||
db.session.add(conversation)
|
||||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Conversation summary generated: {} latency: {}'.format(conversation_id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("generate conversation summary failed")
|
||||
51
api/tasks/recover_document_indexing_task.py
Normal file
51
api/tasks/recover_document_indexing_task.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import datetime
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.indexing_runner import IndexingRunner, DocumentIsPausedException
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import Document
|
||||
|
||||
|
||||
@shared_task
|
||||
def recover_document_indexing_task(dataset_id: str, document_id: str):
|
||||
"""
|
||||
Async recover document
|
||||
:param dataset_id:
|
||||
:param document_id:
|
||||
|
||||
Usage: recover_document_indexing_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style('Recover document: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(
|
||||
Document.id == document_id,
|
||||
Document.dataset_id == dataset_id
|
||||
).first()
|
||||
|
||||
if not document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
if document.indexing_status in ["waiting", "parsing", "cleaning"]:
|
||||
indexing_runner.run(document)
|
||||
elif document.indexing_status == "splitting":
|
||||
indexing_runner.run_in_splitting_status(document)
|
||||
elif document.indexing_status == "indexing":
|
||||
indexing_runner.run_in_indexing_status(document)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Processed document: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except DocumentIsPausedException:
|
||||
logging.info(click.style('Document paused, document id: {}'.format(document.id), fg='yellow'))
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
document.indexing_status = 'error'
|
||||
document.error = str(e)
|
||||
document.stopped_at = datetime.datetime.utcnow()
|
||||
db.session.commit()
|
||||
63
api/tasks/remove_document_from_index_task.py
Normal file
63
api/tasks/remove_document_from_index_task.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment, Document
|
||||
|
||||
|
||||
@shared_task
|
||||
def remove_document_from_index_task(document_id: str):
|
||||
"""
|
||||
Async Remove document from index
|
||||
:param document_id: document id
|
||||
|
||||
Usage: remove_document_from_index.delay(document_id)
|
||||
"""
|
||||
logging.info(click.style('Start remove document segments from index: {}'.format(document_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).filter(Document.id == document_id).first()
|
||||
if not document:
|
||||
raise NotFound('Document not found')
|
||||
|
||||
if document.indexing_status != 'completed':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'document_{}_indexing'.format(document.id)
|
||||
|
||||
try:
|
||||
dataset = document.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.del_doc(document.id)
|
||||
|
||||
# delete from keyword index
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document.id).all()
|
||||
index_node_ids = [segment.index_node_id for segment in segments]
|
||||
if index_node_ids:
|
||||
keyword_table_index.del_nodes(index_node_ids)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Document removed from index: {} latency: {}'.format(document.id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("remove document from index failed")
|
||||
if not document.archived:
|
||||
document.enabled = True
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
58
api/tasks/remove_segment_from_index_task.py
Normal file
58
api/tasks/remove_segment_from_index_task.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from werkzeug.exceptions import NotFound
|
||||
|
||||
from core.index.keyword_table_index import KeywordTableIndex
|
||||
from core.index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task
|
||||
def remove_segment_from_index_task(segment_id: str):
|
||||
"""
|
||||
Async Remove segment from index
|
||||
:param segment_id:
|
||||
|
||||
Usage: remove_segment_from_index.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style('Start remove segment from index: {}'.format(segment_id), fg='green'))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).filter(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
raise NotFound('Segment not found')
|
||||
|
||||
if segment.status != 'completed':
|
||||
return
|
||||
|
||||
indexing_cache_key = 'segment_{}_indexing'.format(segment.id)
|
||||
|
||||
try:
|
||||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Segment has no dataset')
|
||||
|
||||
vector_index = VectorIndex(dataset=dataset)
|
||||
keyword_table_index = KeywordTableIndex(dataset=dataset)
|
||||
|
||||
# delete from vector index
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
vector_index.del_nodes([segment.index_node_id])
|
||||
|
||||
# delete from keyword index
|
||||
keyword_table_index.del_nodes([segment.index_node_id])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style('Segment removed from index: {} latency: {}'.format(segment.id, end_at - start_at), fg='green'))
|
||||
except Exception:
|
||||
logging.exception("remove segment from index failed")
|
||||
segment.enabled = True
|
||||
db.session.commit()
|
||||
finally:
|
||||
redis_client.delete(indexing_cache_key)
|
||||
Reference in New Issue
Block a user