status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
unknown | language
stringclasses 5
values | commit_datetime
unknown | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 11,814 | Create retriever for Outline to ask questions on knowledge base | ### Feature request
A retriever for documents from [Outline](https://github.com/outline/outline).
The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post
The implementation will be similar to the Wikipedia retriever:
https://python.langchain.com/docs/integrations/retrievers/wikipedia
### Motivation
Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM.
### Your contribution
PR will be coming soon. | https://github.com/langchain-ai/langchain/issues/11814 | https://github.com/langchain-ai/langchain/pull/13889 | f2af82058f4904b20ae95c6d17d2b65666bf882a | 935f78c9449c40473541666a8b0a0dc61873b0eb | "2023-10-15T01:58:24Z" | python | "2023-11-27T02:56:12Z" | libs/langchain/tests/integration_tests/utilities/test_outline.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 11,814 | Create retriever for Outline to ask questions on knowledge base | ### Feature request
A retriever for documents from [Outline](https://github.com/outline/outline).
The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post
The implementation will be similar to the Wikipedia retriever:
https://python.langchain.com/docs/integrations/retrievers/wikipedia
### Motivation
Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM.
### Your contribution
PR will be coming soon. | https://github.com/langchain-ai/langchain/issues/11814 | https://github.com/langchain-ai/langchain/pull/13889 | f2af82058f4904b20ae95c6d17d2b65666bf882a | 935f78c9449c40473541666a8b0a0dc61873b0eb | "2023-10-15T01:58:24Z" | python | "2023-11-27T02:56:12Z" | libs/langchain/tests/unit_tests/retrievers/test_imports.py | from langchain.retrievers import __all__
EXPECTED_ALL = [
"AmazonKendraRetriever",
"ArceeRetriever",
"ArxivRetriever",
"AzureCognitiveSearchRetriever",
"ChatGPTPluginRetriever",
"ContextualCompressionRetriever",
"ChaindeskRetriever",
"CohereRagRetriever",
"ElasticSearchBM25Retriever",
"EmbedchainRetriever",
"GoogleDocumentAIWarehouseRetriever",
"GoogleCloudEnterpriseSearchRetriever",
"GoogleVertexAIMultiTurnSearchRetriever",
"GoogleVertexAISearchRetriever",
"KayAiRetriever",
"KNNRetriever",
"LlamaIndexGraphRetriever",
"LlamaIndexRetriever",
"MergerRetriever",
"MetalRetriever",
"MilvusRetriever",
"MultiQueryRetriever",
"PineconeHybridSearchRetriever",
"PubMedRetriever",
"RemoteLangChainRetriever",
"SVMRetriever",
"SelfQueryRetriever",
"TavilySearchAPIRetriever",
"TFIDFRetriever",
"BM25Retriever",
"TimeWeightedVectorStoreRetriever",
"VespaRetriever",
"WeaviateHybridSearchRetriever",
"WikipediaRetriever",
"ZepRetriever",
"ZillizRetriever",
"DocArrayRetriever",
"RePhraseQueryRetriever",
"WebResearchRetriever",
"EnsembleRetriever",
"ParentDocumentRetriever",
"MultiVectorRetriever",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 11,814 | Create retriever for Outline to ask questions on knowledge base | ### Feature request
A retriever for documents from [Outline](https://github.com/outline/outline).
The API has a search endpoint which allows this to be possible: https://www.getoutline.com/developers#tag/Documents/paths/~1documents.search/post
The implementation will be similar to the Wikipedia retriever:
https://python.langchain.com/docs/integrations/retrievers/wikipedia
### Motivation
Outline is an open source project that let's you create a knowledge base, like a wiki. Creating a retriever for Outline will let your team interact with your knowledge base using an LLM.
### Your contribution
PR will be coming soon. | https://github.com/langchain-ai/langchain/issues/11814 | https://github.com/langchain-ai/langchain/pull/13889 | f2af82058f4904b20ae95c6d17d2b65666bf882a | 935f78c9449c40473541666a8b0a0dc61873b0eb | "2023-10-15T01:58:24Z" | python | "2023-11-27T02:56:12Z" | libs/langchain/tests/unit_tests/utilities/test_imports.py | from langchain.utilities import __all__
EXPECTED_ALL = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GraphQLAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MetaphorSearchAPIWrapper",
"OpenWeatherMapAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"PythonREPL",
"Requests",
"RequestsWrapper",
"SQLDatabase",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"ZapierNLAWrapper",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,803 | CypherQueryCorrector cannot validate a correct cypher, some query types are not handled | CypherQueryCorrector does not handle with some query types:
If there is such a query (There is a comma in the match between clauses):
```
MATCH (a:APPLE {apple_id: 123})-[:IN]->(b:BUCKET), (ba:BANANA {name: banana1})
```
Corresponding code section:
- It extract a relation between BUCKET and BANANA, however there is none.
- ELSE case should be divided into cases. (INCOMING relation, OUTGOING relation, BIDIRECTIONAL relation, NO relation etc.)
- IF there is no relation and only a comma between clauses, then it should not try validation.
https://github.com/langchain-ai/langchain/blob/751226e067bc54a70910763c0eebb34544aaf47c/libs/langchain/langchain/chains/graph_qa/cypher_utils.py#L228 | https://github.com/langchain-ai/langchain/issues/13803 | https://github.com/langchain-ai/langchain/pull/13849 | e42e95cc11cc99b8597dc6665a3cfdbb0a0f37e6 | 1ad65f7a982affc9429d10a4919a78ffd54646dc | "2023-11-24T08:39:00Z" | python | "2023-11-27T03:30:11Z" | libs/langchain/langchain/chains/graph_qa/cypher_utils.py | import re
from collections import namedtuple
from typing import Any, Dict, List, Optional, Tuple
Schema = namedtuple("Schema", ["left_node", "relation", "right_node"])
class CypherQueryCorrector:
"""
Used to correct relationship direction in generated Cypher statements.
This code is copied from the winner's submission to the Cypher competition:
https://github.com/sakusaku-rich/cypher-direction-competition
"""
property_pattern = re.compile(r"\{.+?\}")
node_pattern = re.compile(r"\(.+?\)")
path_pattern = re.compile(r"\(.*\).*-.*-.*\(.*\)")
node_relation_node_pattern = re.compile(
r"(\()+(?P<left_node>[^()]*?)\)(?P<relation>.*?)\((?P<right_node>[^()]*?)(\))+"
)
relation_type_pattern = re.compile(r":(?P<relation_type>.+?)?(\{.+\})?]")
def __init__(self, schemas: List[Schema]):
"""
Args:
schemas: list of schemas
"""
self.schemas = schemas
def clean_node(self, node: str) -> str:
"""
Args:
node: node in string format
"""
node = re.sub(self.property_pattern, "", node)
node = node.replace("(", "")
node = node.replace(")", "")
node = node.strip()
return node
def detect_node_variables(self, query: str) -> Dict[str, List[str]]:
"""
Args:
query: cypher query
"""
nodes = re.findall(self.node_pattern, query)
nodes = [self.clean_node(node) for node in nodes]
res: Dict[str, Any] = {}
for node in nodes:
parts = node.split(":")
if parts == "":
continue
variable = parts[0]
if variable not in res:
res[variable] = []
res[variable] += parts[1:]
return res
def extract_paths(self, query: str) -> "List[str]":
"""
Args:
query: cypher query
"""
return re.findall(self.path_pattern, query)
def judge_direction(self, relation: str) -> str:
"""
Args:
relation: relation in string format
"""
direction = "BIDIRECTIONAL"
if relation[0] == "<":
direction = "INCOMING"
if relation[-1] == ">":
direction = "OUTGOING"
return direction
def extract_node_variable(self, part: str) -> Optional[str]:
"""
Args:
part: node in string format
"""
part = part.lstrip("(").rstrip(")")
idx = part.find(":")
if idx != -1:
part = part[:idx]
return None if part == "" else part
def detect_labels(
self, str_node: str, node_variable_dict: Dict[str, Any]
) -> List[str]:
"""
Args:
str_node: node in string format
node_variable_dict: dictionary of node variables
"""
splitted_node = str_node.split(":")
variable = splitted_node[0]
labels = []
if variable in node_variable_dict:
labels = node_variable_dict[variable]
elif variable == "" and len(splitted_node) > 1:
labels = splitted_node[1:]
return labels
def verify_schema(
self,
from_node_labels: List[str],
relation_types: List[str],
to_node_labels: List[str],
) -> bool:
"""
Args:
from_node_labels: labels of the from node
relation_type: type of the relation
to_node_labels: labels of the to node
"""
valid_schemas = self.schemas
if from_node_labels != []:
from_node_labels = [label.strip("`") for label in from_node_labels]
valid_schemas = [
schema for schema in valid_schemas if schema[0] in from_node_labels
]
if to_node_labels != []:
to_node_labels = [label.strip("`") for label in to_node_labels]
valid_schemas = [
schema for schema in valid_schemas if schema[2] in to_node_labels
]
if relation_types != []:
relation_types = [type.strip("`") for type in relation_types]
valid_schemas = [
schema for schema in valid_schemas if schema[1] in relation_types
]
return valid_schemas != []
def detect_relation_types(self, str_relation: str) -> Tuple[str, List[str]]:
"""
Args:
str_relation: relation in string format
"""
relation_direction = self.judge_direction(str_relation)
relation_type = self.relation_type_pattern.search(str_relation)
if relation_type is None or relation_type.group("relation_type") is None:
return relation_direction, []
relation_types = [
t.strip().strip("!")
for t in relation_type.group("relation_type").split("|")
]
return relation_direction, relation_types
def correct_query(self, query: str) -> str:
"""
Args:
query: cypher query
"""
node_variable_dict = self.detect_node_variables(query)
paths = self.extract_paths(query)
for path in paths:
original_path = path
start_idx = 0
while start_idx < len(path):
match_res = re.match(self.node_relation_node_pattern, path[start_idx:])
if match_res is None:
break
start_idx += match_res.start()
match_dict = match_res.groupdict()
left_node_labels = self.detect_labels(
match_dict["left_node"], node_variable_dict
)
right_node_labels = self.detect_labels(
match_dict["right_node"], node_variable_dict
)
end_idx = (
start_idx
+ 4
+ len(match_dict["left_node"])
+ len(match_dict["relation"])
+ len(match_dict["right_node"])
)
original_partial_path = original_path[start_idx : end_idx + 1]
relation_direction, relation_types = self.detect_relation_types(
match_dict["relation"]
)
if relation_types != [] and "".join(relation_types).find("*") != -1:
start_idx += (
len(match_dict["left_node"]) + len(match_dict["relation"]) + 2
)
continue
if relation_direction == "OUTGOING":
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
if not is_legal:
is_legal = self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if is_legal:
corrected_relation = "<" + match_dict["relation"][:-1]
corrected_partial_path = original_partial_path.replace(
match_dict["relation"], corrected_relation
)
query = query.replace(
original_partial_path, corrected_partial_path
)
else:
return ""
elif relation_direction == "INCOMING":
is_legal = self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if not is_legal:
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
if is_legal:
corrected_relation = match_dict["relation"][1:] + ">"
corrected_partial_path = original_partial_path.replace(
match_dict["relation"], corrected_relation
)
query = query.replace(
original_partial_path, corrected_partial_path
)
else:
return ""
else:
is_legal = self.verify_schema(
left_node_labels, relation_types, right_node_labels
)
is_legal |= self.verify_schema(
right_node_labels, relation_types, left_node_labels
)
if not is_legal:
return ""
start_idx += (
len(match_dict["left_node"]) + len(match_dict["relation"]) + 2
)
return query
def __call__(self, query: str) -> str:
"""Correct the query to make it valid. If
Args:
query: cypher query
"""
return self.correct_query(query)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,803 | CypherQueryCorrector cannot validate a correct cypher, some query types are not handled | CypherQueryCorrector does not handle with some query types:
If there is such a query (There is a comma in the match between clauses):
```
MATCH (a:APPLE {apple_id: 123})-[:IN]->(b:BUCKET), (ba:BANANA {name: banana1})
```
Corresponding code section:
- It extract a relation between BUCKET and BANANA, however there is none.
- ELSE case should be divided into cases. (INCOMING relation, OUTGOING relation, BIDIRECTIONAL relation, NO relation etc.)
- IF there is no relation and only a comma between clauses, then it should not try validation.
https://github.com/langchain-ai/langchain/blob/751226e067bc54a70910763c0eebb34544aaf47c/libs/langchain/langchain/chains/graph_qa/cypher_utils.py#L228 | https://github.com/langchain-ai/langchain/issues/13803 | https://github.com/langchain-ai/langchain/pull/13849 | e42e95cc11cc99b8597dc6665a3cfdbb0a0f37e6 | 1ad65f7a982affc9429d10a4919a78ffd54646dc | "2023-11-24T08:39:00Z" | python | "2023-11-27T03:30:11Z" | libs/langchain/tests/unit_tests/data/cypher_corrector.csv | "statement","schema","correct_query"
"MATCH (p:Person)-[:KNOWS]->(:Person) RETURN p, count(*) AS count","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)-[:KNOWS]->(:Person) RETURN p, count(*) AS count"
"MATCH (p:Person)<-[:KNOWS]-(:Person) RETURN p, count(*) AS count","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)<-[:KNOWS]-(:Person) RETURN p, count(*) AS count"
"MATCH (p:Person {id:""Foo""})<-[:WORKS_AT]-(o:Organization) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person {id:""Foo""})-[:WORKS_AT]->(o:Organization) RETURN o.name AS name"
"MATCH (o:Organization)-[:WORKS_AT]->(p:Person {id:""Foo""}) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:Organization)<-[:WORKS_AT]-(p:Person {id:""Foo""}) RETURN o.name AS name"
"MATCH (o:Organization {name:""Bar""})-[:WORKS_AT]->(p:Person {id:""Foo""}) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:Organization {name:""Bar""})<-[:WORKS_AT]-(p:Person {id:""Foo""}) RETURN o.name AS name"
"MATCH (o:Organization)-[:WORKS_AT]->(p:Person {id:""Foo""})-[:WORKS_AT]->(o1:Organization) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:Organization)<-[:WORKS_AT]-(p:Person {id:""Foo""})-[:WORKS_AT]->(o1:Organization) RETURN o.name AS name"
"MATCH (o:`Organization` {name:""Foo""})-[:WORKS_AT]->(p:Person {id:""Foo""})-[:WORKS_AT]-(o1:Organization {name:""b""})
WHERE id(o) > id(o1)
RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:`Organization` {name:""Foo""})<-[:WORKS_AT]-(p:Person {id:""Foo""})-[:WORKS_AT]-(o1:Organization {name:""b""})
WHERE id(o) > id(o1)
RETURN o.name AS name"
"MATCH (p:Person)
RETURN p,
[(p)-[:WORKS_AT]->(o:Organization) | o.name] AS op","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)
RETURN p,
[(p)-[:WORKS_AT]->(o:Organization) | o.name] AS op"
"MATCH (p:Person)
RETURN p,
[(p)<-[:WORKS_AT]-(o:Organization) | o.name] AS op","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)
RETURN p,
[(p)-[:WORKS_AT]->(o:Organization) | o.name] AS op"
"MATCH (p:Person {name:""John""}) MATCH (p)-[:WORKS_AT]->(:Organization) RETURN p, count(*)","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person {name:""John""}) MATCH (p)-[:WORKS_AT]->(:Organization) RETURN p, count(*)"
"MATCH (p:Person) MATCH (p)<-[:WORKS_AT]-(:Organization) RETURN p, count(*)","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person) MATCH (p)-[:WORKS_AT]->(:Organization) RETURN p, count(*)"
"MATCH (p:Person), (p)<-[:WORKS_AT]-(:Organization) RETURN p, count(*)","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person), (p)-[:WORKS_AT]->(:Organization) RETURN p, count(*)"
"MATCH (o:Organization)-[:WORKS_AT]->(p:Person {id:""Foo""})-[:WORKS_AT]->(o1:Organization)
WHERE id(o) < id(o1) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:Organization)<-[:WORKS_AT]-(p:Person {id:""Foo""})-[:WORKS_AT]->(o1:Organization)
WHERE id(o) < id(o1) RETURN o.name AS name"
"MATCH (o:Organization)-[:WORKS_AT]-(p:Person {id:""Foo""})-[:WORKS_AT]-(o1:Organization)
WHERE id(o) < id(o1) RETURN o.name AS name","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (o:Organization)-[:WORKS_AT]-(p:Person {id:""Foo""})-[:WORKS_AT]-(o1:Organization)
WHERE id(o) < id(o1) RETURN o.name AS name"
"MATCH (p:Person)--(:Organization)--(p1:Person)
RETURN p1","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)--(:Organization)--(p1:Person)
RETURN p1"
"MATCH (p:Person)<--(:Organization)--(p1:Person)
RETURN p1","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)-->(:Organization)--(p1:Person)
RETURN p1"
"MATCH (p:Person)<-[r]-(:Organization)--(p1:Person)
RETURN p1, r","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)-[r]->(:Organization)--(p1:Person)
RETURN p1, r"
"MATCH (person:Person)
CALL {
WITH person
MATCH (person)-->(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (person:Person)
CALL {
WITH person
MATCH (person)-->(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o"
"MATCH (person:Person)
CALL {
WITH person
MATCH (person)<--(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (person:Person)
CALL {
WITH person
MATCH (person)-->(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o"
"MATCH (person:Person)
CALL {
WITH person
MATCH (person)-[:KNOWS]->(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)",
"MATCH (person:Person)
CALL {
WITH person
MATCH (person)<-[:WORKS_AT|INVESTOR]-(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o","(Person, KNOWS, Person), (Person, WORKS_AT, Organization), (Person, INVESTOR, Organization)","MATCH (person:Person)
CALL {
WITH person
MATCH (person)-[:WORKS_AT|INVESTOR]->(o:Organization)
RETURN o LIMIT 3
}
RETURN person, o"
"MATCH (p:Person)
WHERE EXISTS { (p)<-[:KNOWS]-()}
RETURN p","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)
WHERE EXISTS { (p)<-[:KNOWS]-()}
RETURN p"
"MATCH (p:Person)
WHERE EXISTS { (p)-[:KNOWS]->()}
RETURN p","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)
WHERE EXISTS { (p)-[:KNOWS]->()}
RETURN p"
"MATCH (p:Person)
WHERE EXISTS { (p)<-[:WORKS_AT]-()}
RETURN p","(Person, KNOWS, Person), (Person, WORKS_AT, Organization)","MATCH (p:Person)
WHERE EXISTS { (p)-[:WORKS_AT]->()}
RETURN p"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title"
"MATCH (p:Person)-[:ACTED_IN]-(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]-(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title"
"MATCH (p:Person)<-[:ACTED_IN]-(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND m.year = 2013
RETURN m.title"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
RETURN p.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
RETURN p.name"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
AND m.year > 2019
AND m.year < 2030
RETURN p.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
AND m.year > 2019
AND m.year < 2030
RETURN p.name"
"MATCH (p:Person)<-[:ACTED_IN]-(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
AND m.year > 2019
AND m.year < 2030
RETURN p.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
AND m.year > 2019
AND m.year < 2030
RETURN p.name"
"MATCH (p:Person)<-[:FOLLOWS]-(m:Movie)
WHERE p.name <> 'Tom Hanks'
AND m.title = 'Captain Phillips'
AND m.year > 2019
AND m.year < 2030
RETURN p.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)",
"MATCH (p:Person)-[:`ACTED_IN`]->(m:Movie)<-[:DIRECTED]-(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:`ACTED_IN`]->(m:Movie)<-[:DIRECTED]-(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title"
"MATCH (p:Person)-[:ACTED_IN]-(m:Movie)<-[:DIRECTED]-(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]-(m:Movie)<-[:DIRECTED]-(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title"
"MATCH (p:Person)-[:ACTED_IN]-(m:Movie)-[:DIRECTED]->(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]-(m:Movie)<-[:DIRECTED]-(p)
WHERE p.born.year > 1960
RETURN p.name, p.born, labels(p), m.title"
"MATCH (p:`Person`)<-[r]-(m:Movie)
WHERE p.name = 'Tom Hanks'
RETURN m.title AS movie, type(r) AS relationshipType","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:`Person`)-[r]->(m:Movie)
WHERE p.name = 'Tom Hanks'
RETURN m.title AS movie, type(r) AS relationshipType"
"MATCH (d:Person)-[:DIRECTED]->(m:Movie)-[:IN_GENRE]->(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (d:Person)-[:DIRECTED]->(m:Movie)-[:IN_GENRE]->(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name"
"MATCH (d:Person)-[:DIRECTED]->(m:Movie)<--(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (d:Person)-[:DIRECTED]->(m:Movie)-->(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name"
"MATCH (d:Person)<--(m:Movie)<--(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (d:Person)-->(m:Movie)-->(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name"
"MATCH (d:Person)-[:DIRECTED]-(m:Movie)<-[:IN_GENRE]-(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (d:Person)-[:DIRECTED]-(m:Movie)-[:IN_GENRE]->(g:Genre)
WHERE m.year = 2000 AND g.name = ""Horror""
RETURN d.name"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND exists {(p)-[:DIRECTED]->(m)}
RETURN p.name, labels(p), m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND exists {(p)-[:DIRECTED]->(m)}
RETURN p.name, labels(p), m.title"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND exists {(p)<-[:DIRECTED]-(m)}
RETURN p.name, labels(p), m.title","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie)
WHERE p.name = 'Tom Hanks'
AND exists {(p)-[:DIRECTED]->(m)}
RETURN p.name, labels(p), m.title"
"MATCH (a:Person)-[:ACTED_IN]->(m:Movie)
WHERE m.year > 2000
MATCH (m)<-[:DIRECTED]-(d:Person)
RETURN a.name, m.title, d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (a:Person)-[:ACTED_IN]->(m:Movie)
WHERE m.year > 2000
MATCH (m)<-[:DIRECTED]-(d:Person)
RETURN a.name, m.title, d.name"
"MATCH (a:Person)-[:ACTED_IN]-(m:Movie)
WHERE m.year > 2000
MATCH (m)-[:DIRECTED]->(d:Person)
RETURN a.name, m.title, d.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (a:Person)-[:ACTED_IN]-(m:Movie)
WHERE m.year > 2000
MATCH (m)<-[:DIRECTED]-(d:Person)
RETURN a.name, m.title, d.name"
"MATCH (m:Movie) WHERE m.title = ""Kiss Me Deadly""
MATCH (m)-[:IN_GENRE]-(g:Genre)-[:IN_GENRE]->(rec:Movie)
MATCH (m)-[:ACTED_IN]->(a:Person)-[:ACTED_IN]-(rec)
RETURN rec.title, a.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (m:Movie) WHERE m.title = ""Kiss Me Deadly""
MATCH (m)-[:IN_GENRE]-(g:Genre)<-[:IN_GENRE]-(rec:Movie)
MATCH (m)<-[:ACTED_IN]-(a:Person)-[:ACTED_IN]-(rec)
RETURN rec.title, a.name"
"MATCH (p:Person)-[:ACTED_IN]->(m:Movie),
(coActors:Person)-[:ACTED_IN]->(m)
WHERE p.name = 'Eminem'
RETURN m.title AS movie ,collect(coActors.name) AS coActors","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie),
(coActors:Person)-[:ACTED_IN]->(m)
WHERE p.name = 'Eminem'
RETURN m.title AS movie ,collect(coActors.name) AS coActors"
"MATCH (p:Person)<-[:ACTED_IN]-(m:Movie),
(coActors:Person)-[:ACTED_IN]->(m)
WHERE p.name = 'Eminem'
RETURN m.title AS movie ,collect(coActors.name) AS coActors","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN]->(m:Movie),
(coActors:Person)-[:ACTED_IN]->(m)
WHERE p.name = 'Eminem'
RETURN m.title AS movie ,collect(coActors.name) AS coActors"
"MATCH p = ((person:Person)<-[]-(movie:Movie))
WHERE person.name = 'Walt Disney'
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = ((person:Person)-[]->(movie:Movie))
WHERE person.name = 'Walt Disney'
RETURN p"
"MATCH p = ((person:Person)<-[:DIRECTED]-(movie:Movie))
WHERE person.name = 'Walt Disney'
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = ((person:Person)-[:DIRECTED]->(movie:Movie))
WHERE person.name = 'Walt Disney'
RETURN p"
"MATCH p = shortestPath((p1:Person)-[*]-(p2:Person))
WHERE p1.name = ""Eminem""
AND p2.name = ""Charlton Heston""
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = shortestPath((p1:Person)-[*]-(p2:Person))
WHERE p1.name = ""Eminem""
AND p2.name = ""Charlton Heston""
RETURN p"
"MATCH p = ((person:Person)-[:DIRECTED*]->(:Person))
WHERE person.name = 'Walt Disney'
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = ((person:Person)-[:DIRECTED*]->(:Person))
WHERE person.name = 'Walt Disney'
RETURN p"
"MATCH p = ((person:Person)-[:DIRECTED*1..4]->(:Person))
WHERE person.name = 'Walt Disney'
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = ((person:Person)-[:DIRECTED*1..4]->(:Person))
WHERE person.name = 'Walt Disney'
RETURN p"
"MATCH (p:Person {name: 'Eminem'})-[:ACTED_IN*2]-(others:Person)
RETURN others.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person {name: 'Eminem'})-[:ACTED_IN*2]-(others:Person)
RETURN others.name"
"MATCH (u:User {name: ""Misty Williams""})-[r:RATED]->(:Movie)
WITH u, avg(r.rating) AS average
MATCH (u)-[r:RATED]->(m:Movie)
WHERE r.rating > average
RETURN average , m.title AS movie,
r.rating as rating
ORDER BY rating DESC","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (User, RATED, Movie)","MATCH (u:User {name: ""Misty Williams""})-[r:RATED]->(:Movie)
WITH u, avg(r.rating) AS average
MATCH (u)-[r:RATED]->(m:Movie)
WHERE r.rating > average
RETURN average , m.title AS movie,
r.rating as rating
ORDER BY rating DESC"
"MATCH (u:User {name: ""Misty Williams""})-[r:RATED]->(:Movie)
WITH u, avg(r.rating) AS average
MATCH (u)<-[r:RATED]-(m:Movie)
WHERE r.rating > average
RETURN average , m.title AS movie,
r.rating as rating
ORDER BY rating DESC","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (User, RATED, Movie)","MATCH (u:User {name: ""Misty Williams""})-[r:RATED]->(:Movie)
WITH u, avg(r.rating) AS average
MATCH (u)-[r:RATED]->(m:Movie)
WHERE r.rating > average
RETURN average , m.title AS movie,
r.rating as rating
ORDER BY rating DESC"
"MATCH (p:`Person`)
WHERE p.born.year = 1980
WITH p LIMIT 3
MATCH (p)<-[:ACTED_IN]-(m:Movie)
WITH p, collect(m.title) AS movies
RETURN p.name AS actor, movies","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:`Person`)
WHERE p.born.year = 1980
WITH p LIMIT 3
MATCH (p)-[:ACTED_IN]->(m:Movie)
WITH p, collect(m.title) AS movies
RETURN p.name AS actor, movies"
"MATCH (p:Person)
WHERE p.born.year = 1980
WITH p LIMIT 3
MATCH (p)-[:ACTED_IN]->(m:Movie)<-[:IN_GENRE]-(g)
WITH p, collect(DISTINCT g.name) AS genres
RETURN p.name AS actor, genres","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)
WHERE p.born.year = 1980
WITH p LIMIT 3
MATCH (p)-[:ACTED_IN]->(m:Movie)-[:IN_GENRE]->(g)
WITH p, collect(DISTINCT g.name) AS genres
RETURN p.name AS actor, genres"
"CALL {
MATCH (m:Movie) WHERE m.year = 2000
RETURN m ORDER BY m.imdbRating DESC LIMIT 10
}
MATCH (:User)-[r:RATED]->(m)
RETURN m.title, avg(r.rating)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (User, RATED, Movie)","CALL {
MATCH (m:Movie) WHERE m.year = 2000
RETURN m ORDER BY m.imdbRating DESC LIMIT 10
}
MATCH (:User)-[r:RATED]->(m)
RETURN m.title, avg(r.rating)"
"CALL {
MATCH (m:Movie) WHERE m.year = 2000
RETURN m ORDER BY m.imdbRating DESC LIMIT 10
}
MATCH (:User)<-[r:RATED]-(m)
RETURN m.title, avg(r.rating)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (User, RATED, Movie)","CALL {
MATCH (m:Movie) WHERE m.year = 2000
RETURN m ORDER BY m.imdbRating DESC LIMIT 10
}
MATCH (:User)-[r:RATED]->(m)
RETURN m.title, avg(r.rating)"
"MATCH (m:Movie)
CALL {
WITH m
MATCH (m)-[r:RATED]->(u)
WHERE r.rating = 5
RETURN count(u) AS numReviews
}
RETURN m.title, numReviews
ORDER BY numReviews DESC","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (m:Movie)
CALL {
WITH m
MATCH (m)<-[r:RATED]-(u)
WHERE r.rating = 5
RETURN count(u) AS numReviews
}
RETURN m.title, numReviews
ORDER BY numReviews DESC"
"MATCH (p:Person)
WITH p LIMIT 100
CALL {
WITH p
OPTIONAL MATCH (p)<-[:ACTED_IN]-(m)
RETURN m.title + "": "" + ""Actor"" AS work
UNION
WITH p
OPTIONAL MATCH (p)-[:DIRECTED]->(m:Movie)
RETURN m.title+ "": "" + ""Director"" AS work
}
RETURN p.name, collect(work)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)
WITH p LIMIT 100
CALL {
WITH p
OPTIONAL MATCH (p)-[:ACTED_IN]->(m)
RETURN m.title + "": "" + ""Actor"" AS work
UNION
WITH p
OPTIONAL MATCH (p)-[:DIRECTED]->(m:Movie)
RETURN m.title+ "": "" + ""Director"" AS work
}
RETURN p.name, collect(work)"
"MATCH (p:Person)<-[:ACTED_IN {role:""Neo""}]-(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN {role:""Neo""}]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (p:Person)<-[:ACTED_IN {role:""Neo""}]-(m)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN {role:""Neo""}]->(m)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p"
"MATCH (p:Person)-[:ACTED_IN {role:""Neo""}]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:ACTED_IN {role:""Neo""}]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (wallstreet:Movie {title: 'Wall Street'})-[:ACTED_IN {role:""Foo""}]->(actor)
RETURN actor.name","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (wallstreet:Movie {title: 'Wall Street'})<-[:ACTED_IN {role:""Foo""}]-(actor)
RETURN actor.name"
"MATCH (p:Person)<-[:`ACTED_IN` {role:""Neo""}]-(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:`ACTED_IN` {role:""Neo""}]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (p:`Person`)<-[:`ACTED_IN` {role:""Neo""}]-(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:`Person`)-[:`ACTED_IN` {role:""Neo""}]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (p:`Person`)<-[:`ACTED_IN` {role:""Neo""}]-(m)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:`Person`)-[:`ACTED_IN` {role:""Neo""}]->(m)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (p:Person)<-[:!DIRECTED]-(:Movie) RETURN p, count(*)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:!DIRECTED]->(:Movie) RETURN p, count(*)"
"MATCH (p:Person)<-[:`ACTED_IN`|`DIRECTED`]-(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH (p:Person)-[:`ACTED_IN`|`DIRECTED`]->(m:Movie)
WHERE p.name = $actorName
AND m.title = $movieName
RETURN p, m"
"MATCH (a:Person:Actor)-[:ACTED_IN]->(:Movie)
RETURN a, count(*)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (Actor, ACTED_IN, Movie)","MATCH (a:Person:Actor)-[:ACTED_IN]->(:Movie)
RETURN a, count(*)"
"MATCH (a:Person:Actor)<-[:ACTED_IN]-(:Movie)
RETURN a, count(*)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (Actor, ACTED_IN, Movie)","MATCH (a:Person:Actor)-[:ACTED_IN]->(:Movie)
RETURN a, count(*)"
"MATCH (a:Person:Actor)<-[:ACTED_IN]-()
RETURN a, count(*)","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (Actor, ACTED_IN, Movie)","MATCH (a:Person:Actor)-[:ACTED_IN]->()
RETURN a, count(*)"
"MATCH (a:Person:Actor)
RETURN a, [(a)<-[:`ACTED_IN`]-(m) | m.title] AS movies","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (Actor, ACTED_IN, Movie)","MATCH (a:Person:Actor)
RETURN a, [(a)-[:`ACTED_IN`]->(m) | m.title] AS movies"
"MATCH (a:Person:Actor)
RETURN a, [(a)-[:`ACTED_IN`]->(m) | m.title] AS movies","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie), (Actor, ACTED_IN, Movie)","MATCH (a:Person:Actor)
RETURN a, [(a)-[:`ACTED_IN`]->(m) | m.title] AS movies"
"MATCH p = ((person:Person)-[:DIRECTED*]->(:Movie)) RETURN p
","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","MATCH p = ((person:Person)-[:DIRECTED*]->(:Movie)) RETURN p
"
"""MATCH p = ((person:Person)-[:DIRECTED*1..3]->(:Movie)) RETURN p""","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","""MATCH p = ((person:Person)-[:DIRECTED*1..3]->(:Movie)) RETURN p"""
"""MATCH p = ((person:Person)-[:DIRECTED*..3]->(:Movie)) RETURN p""","(Person, FOLLOWS, Person), (Person, ACTED_IN, Movie), (Person, REVIEWED, Movie), (Person, WROTE, Movie), (Person, DIRECTED, Movie), (Movie, IN_GENRE, Genre), (Person, RATED, Movie)","""MATCH p = ((person:Person)-[:DIRECTED*..3]->(:Movie)) RETURN p"""
"MATCH (p:Person {name:""Emil Eifrem""})-[:HAS_CEO]-(o:Organization)<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
RETURN o.name AS company, a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3
","(Person, HAS_CEO, Organization), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH (p:Person {name:""Emil Eifrem""})-[:HAS_CEO]-(o:Organization)<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
RETURN o.name AS company, a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3
"
"MATCH (p:Person {name:""Emil Eifrem""})-[:HAS_CEO]->(o:Organization)<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
RETURN o.name AS company, a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3
","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH (p:Person {name:""Emil Eifrem""})<-[:HAS_CEO]-(o:Organization)<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
RETURN o.name AS company, a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3
"
"MATCH (o:Organization {name: ""Databricks""})-[:HAS_COMPETITOR]->(c:Organization)
RETURN c.name as Competitor","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH (o:Organization {name: ""Databricks""})-[:HAS_COMPETITOR]->(c:Organization)
RETURN c.name as Competitor"
"MATCH (o:Organization {name: ""Databricks""})<-[:HAS_COMPETITOR]-(c:Organization)
RETURN c.name as Competitor","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH (o:Organization {name: ""Databricks""})<-[:HAS_COMPETITOR]-(c:Organization)
RETURN c.name as Competitor"
"MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]->(t)
WHERE NOT EXISTS {(t)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]->(t)
WHERE NOT EXISTS {(t)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max"
"MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]-(t)
WHERE NOT EXISTS {(t)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]-(t)
WHERE NOT EXISTS {(t)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max"
"MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]-(t:Person)
WHERE NOT EXISTS {(o)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH p=(o:Organization {name:""Blackstone""})-[:HAS_SUBSIDIARY*]-(t:Person)
WHERE NOT EXISTS {(o)-[:HAS_SUBSIDIARY]->()}
RETURN max(length(p)) AS max"
"CALL apoc.ml.openai.embedding([""Are there any news regarding employee satisfaction?""], $openai_api_key) YIELD embedding
CALL db.index.vector.queryNodes(""news"", 3, embedding) YIELD node,score
RETURN node.text AS text, score","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","CALL apoc.ml.openai.embedding([""Are there any news regarding employee satisfaction?""], $openai_api_key) YIELD embedding
CALL db.index.vector.queryNodes(""news"", 3, embedding) YIELD node,score
RETURN node.text AS text, score"
"MATCH (o:Organization {name:""Neo4j""})<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
WHERE toLower(c.text) CONTAINS 'partnership'
RETURN a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3","(Organization, HAS_CEO, Person), (Article, MENTIONS, Organization), (Article, HAS_CHUNK, Chunk), (Organization, HAS_COMPETITOR, Organization), (Organization, HAS_SUBSIDIARY, Organization)","MATCH (o:Organization {name:""Neo4j""})<-[:MENTIONS]-(a:Article)-[:HAS_CHUNK]->(c)
WHERE toLower(c.text) CONTAINS 'partnership'
RETURN a.title AS title, c.text AS text, a.date AS date
ORDER BY date DESC LIMIT 3"
"MATCH (n:`Some Label`)-[:`SOME REL TYPE üäß`]->(m:`Sömé Øther Læbel`) RETURN n,m","(Some Label, SOME REL TYPE üäß, Sömé Øther Læbel)","MATCH (n:`Some Label`)-[:`SOME REL TYPE üäß`]->(m:`Sömé Øther Læbel`) RETURN n,m"
"MATCH (n:`Some Label`)<-[:`SOME REL TYPE üäß`]-(m:`Sömé Øther Læbel`) RETURN n,m","(Some Label, SOME REL TYPE üäß, Sömé Øther Læbel)","MATCH (n:`Some Label`)-[:`SOME REL TYPE üäß`]->(m:`Sömé Øther Læbel`) RETURN n,m"
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,667 | Can not load chain with ChatPromptTemplate | ### System Info
LangChain version: `0.0.339`
Python version: `3.10`
### Who can help?
@hwchase17
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [X] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
When serializing the `ChatPromptTemplate`, it saves it in a JSON/YAML format like this:
```
{'input_variables': ['question', 'context'],
'output_parser': None,
'partial_variables': {},
'messages': [{'prompt': {'input_variables': ['context', 'question'],
'output_parser': None,
'partial_variables': {},
'template': "...",
'template_format': 'f-string',
'validate_template': True,
'_type': 'prompt'},
'additional_kwargs': {}}],
'_type': 'chat'}
```
Note that the `_type` is "chat".
However, LangChain's `load_prompt_from_config` [does not recognize "chat" as the supported prompt type](https://github.com/langchain-ai/langchain/blob/master/libs/core/langchain_core/prompts/loading.py#L19).
Here is a minimal example to reproduce the issue:
```python
from langchain.prompts import ChatPromptTemplate
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.loading import load_chain
TEMPLATE = """Answer the question based on the context:
{context}
Question: {question}
Answer:
"""
chat_prompt = ChatPromptTemplate.from_template(TEMPLATE)
llm = OpenAI()
def get_retriever(persist_dir = None):
vectorstore = FAISS.from_texts(
["harrison worked at kensho"], embedding=OpenAIEmbeddings()
)
return vectorstore.as_retriever()
chain_with_chat_prompt = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=get_retriever(),
chain_type_kwargs={"prompt": chat_prompt},
)
chain_with_prompt_saved_path = "./chain_with_prompt.yaml"
chain_with_prompt.save(chain_with_prompt_saved_path)
loaded_chain = load_chain(chain_with_prompt_saved_path, retriever=get_retriever())
```
The above script failed with the error:
`ValueError: Loading chat prompt not supported`
### Expected behavior
Load a chain that contains `ChatPromptTemplate` should work. | https://github.com/langchain-ai/langchain/issues/13667 | https://github.com/langchain-ai/langchain/pull/13818 | 8a3e0c9afa59487b191e411a4acae55e39db7fa9 | b3e08f9239d5587fa61adfa9c58dafdf6b740a38 | "2023-11-21T17:40:19Z" | python | "2023-11-27T16:39:50Z" | libs/core/langchain_core/prompts/loading.py | """Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils import try_load_from_hub
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/prompts/"
logger = logging.getLogger(__name__)
def load_prompt_from_config(config: dict) -> BasePromptTemplate:
"""Load prompt from Config Dict."""
if "_type" not in config:
logger.warning("No `_type` key found, defaulting to `prompt`.")
config_type = config.pop("_type", "prompt")
if config_type not in type_to_loader_dict:
raise ValueError(f"Loading {config_type} prompt not supported")
prompt_loader = type_to_loader_dict[config_type]
return prompt_loader(config)
def _load_template(var_name: str, config: dict) -> dict:
"""Load template from the path if applicable."""
# Check if template_path exists in config.
if f"{var_name}_path" in config:
# If it does, make sure template variable doesn't also exist.
if var_name in config:
raise ValueError(
f"Both `{var_name}_path` and `{var_name}` cannot be provided."
)
# Pop the template path from the config.
template_path = Path(config.pop(f"{var_name}_path"))
# Load the template.
if template_path.suffix == ".txt":
with open(template_path) as f:
template = f.read()
else:
raise ValueError
# Set the template variable to the extracted variable.
config[var_name] = template
return config
def _load_examples(config: dict) -> dict:
"""Load examples if necessary."""
if isinstance(config["examples"], list):
pass
elif isinstance(config["examples"], str):
with open(config["examples"]) as f:
if config["examples"].endswith(".json"):
examples = json.load(f)
elif config["examples"].endswith((".yaml", ".yml")):
examples = yaml.safe_load(f)
else:
raise ValueError(
"Invalid file format. Only json or yaml formats are supported."
)
config["examples"] = examples
else:
raise ValueError("Invalid examples format. Only list or string are supported.")
return config
def _load_output_parser(config: dict) -> dict:
"""Load output parser."""
if "output_parser" in config and config["output_parser"]:
_config = config.pop("output_parser")
output_parser_type = _config.pop("_type")
if output_parser_type == "default":
output_parser = StrOutputParser(**_config)
else:
raise ValueError(f"Unsupported output parser {output_parser_type}")
config["output_parser"] = output_parser
return config
def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:
"""Load the "few shot" prompt from the config."""
# Load the suffix and prefix templates.
config = _load_template("suffix", config)
config = _load_template("prefix", config)
# Load the example prompt.
if "example_prompt_path" in config:
if "example_prompt" in config:
raise ValueError(
"Only one of example_prompt and example_prompt_path should "
"be specified."
)
config["example_prompt"] = load_prompt(config.pop("example_prompt_path"))
else:
config["example_prompt"] = load_prompt_from_config(config["example_prompt"])
# Load the examples.
config = _load_examples(config)
config = _load_output_parser(config)
return FewShotPromptTemplate(**config)
def _load_prompt(config: dict) -> PromptTemplate:
"""Load the prompt template from config."""
# Load the template from disk if necessary.
config = _load_template("template", config)
config = _load_output_parser(config)
template_format = config.get("template_format", "f-string")
if template_format == "jinja2":
# Disabled due to:
# https://github.com/langchain-ai/langchain/issues/4394
raise ValueError(
f"Loading templates with '{template_format}' format is no longer supported "
f"since it can lead to arbitrary code execution. Please migrate to using "
f"the 'f-string' template format, which does not suffer from this issue."
)
return PromptTemplate(**config)
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)
type_to_loader_dict: Dict[str, Callable[[dict], BasePromptTemplate]] = {
"prompt": _load_prompt,
"few_shot": _load_few_shot_prompt,
}
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,407 | RunnableLambda: returned runnable called synchronously when using ainvoke | ### System Info
langchain on master branch
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [X] Chains
- [ ] Callbacks/Tracing
- [X] Async
### Reproduction
```
from langchain.schema.runnable import RunnableLambda
import asyncio
def idchain_sync(__input):
print(f'sync chain call: {__input}')
return __input
async def idchain_async(__input):
print(f'async chain call: {__input}')
return __input
idchain = RunnableLambda(func=idchain_sync,afunc=idchain_async)
def func(__input):
return idchain
asyncio.run(RunnableLambda(func).ainvoke('toto'))
#printss 'sync chain call: toto' instead of 'async chain call: toto'
```
### Expected behavior
LCEL's route can cause chains to be silently run synchronously, while the user uses ainvoke...
When calling a RunnableLambda A returning a chain B with ainvoke, we would expect the new chain B to be called with ainvoke;
However, if the function provided to RunnableLambda A is not async, then the chain B will be called with invoke, silently causing all the rest of the chain to be called synchronously.
| https://github.com/langchain-ai/langchain/issues/13407 | https://github.com/langchain-ai/langchain/pull/13408 | 391f200eaab9af587eea902b264f2d3af929b268 | e17edc4d0b1dccc98df2a7d1db86bfcc0eb66ca5 | "2023-11-15T17:27:49Z" | python | "2023-11-28T11:18:26Z" | libs/core/langchain_core/runnables/base.py | from __future__ import annotations
import asyncio
import inspect
import threading
from abc import ABC, abstractmethod
from concurrent.futures import FIRST_COMPLETED, wait
from functools import partial
from itertools import tee
from operator import itemgetter
from typing import (
TYPE_CHECKING,
Any,
AsyncIterator,
Awaitable,
Callable,
Dict,
Generic,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
from typing_extensions import Literal, get_args
from langchain_core.load.dump import dumpd
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import BaseModel, Field, create_model
from langchain_core.runnables.config import (
RunnableConfig,
acall_func_with_variable_args,
call_func_with_variable_args,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
get_config_list,
get_executor_for_config,
merge_configs,
patch_config,
)
from langchain_core.runnables.utils import (
AddableDict,
AnyConfigurableField,
ConfigurableField,
ConfigurableFieldSpec,
Input,
Output,
accepts_config,
accepts_run_manager,
gather_with_concurrency,
get_function_first_arg_dict_keys,
get_lambda_source,
get_unique_config_specs,
indent_lines_after_first,
)
from langchain_core.utils.aiter import atee, py_anext
from langchain_core.utils.iter import safetee
if TYPE_CHECKING:
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.runnables.fallbacks import (
RunnableWithFallbacks as RunnableWithFallbacksT,
)
from langchain_core.tracers.log_stream import RunLog, RunLogPatch
from langchain_core.tracers.root_listeners import Listener
Other = TypeVar("Other")
class Runnable(Generic[Input, Output], ABC):
"""A unit of work that can be invoked, batched, streamed, transformed and composed.
Key Methods
===========
* invoke/ainvoke: Transforms a single input into an output.
* batch/abatch: Efficiently transforms multiple inputs into outputs.
* stream/astream: Streams output from a single input as it's produced.
* astream_log: Streams output and selected intermediate results from an input.
Built-in optimizations:
* Batch: By default, batch runs invoke() in parallel using a thread pool executor.
Override to optimize batching.
* Async: Methods with "a" suffix are asynchronous. By default, they execute
the sync counterpart using asyncio's thread pool.
Override for native async.
All methods accept an optional config argument, which can be used to configure
execution, add tags and metadata for tracing and debugging etc.
Runnables expose schematic information about their input, output and config via
the input_schema property, the output_schema property and config_schema method.
LCEL and Composition
====================
The LangChain Expression Language (LCEL) is a declarative way to compose Runnables
into chains. Any chain constructed this way will automatically have sync, async,
batch, and streaming support.
The main composition primitives are RunnableSequence and RunnableParallel.
RunnableSequence invokes a series of runnables sequentially, with one runnable's
output serving as the next's input. Construct using the `|` operator or by
passing a list of runnables to RunnableSequence.
RunnableParallel invokes runnables concurrently, providing the same input
to each. Construct it using a dict literal within a sequence or by passing a
dict to RunnableParallel.
For example,
.. code-block:: python
from langchain_core.runnables import RunnableLambda
# A RunnableSequence constructed using the `|` operator
sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2)
sequence.invoke(1) # 4
sequence.batch([1, 2, 3]) # [4, 6, 8]
# A sequence that contains a RunnableParallel constructed using a dict literal
sequence = RunnableLambda(lambda x: x + 1) | {
'mul_2': RunnableLambda(lambda x: x * 2),
'mul_5': RunnableLambda(lambda x: x * 5)
}
sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}
Standard Methods
================
All Runnables expose additional methods that can be used to modify their behavior
(e.g., add a retry policy, add lifecycle listeners, make them configurable, etc.).
These methods will work on any Runnable, including Runnable chains constructed
by composing other Runnables. See the individual methods for details.
For example,
.. code-block:: python
from langchain_core.runnables import RunnableLambda
import random
def add_one(x: int) -> int:
return x + 1
def buggy_double(y: int) -> int:
'''Buggy code that will fail 70% of the time'''
if random.random() > 0.3:
print('This code failed, and will probably be retried!')
raise ValueError('Triggered buggy code')
return y * 2
sequence = (
RunnableLambda(add_one) |
RunnableLambda(buggy_double).with_retry( # Retry on failure
stop_after_attempt=10,
wait_exponential_jitter=False
)
)
print(sequence.input_schema.schema()) # Show inferred input schema
print(sequence.output_schema.schema()) # Show inferred output schema
print(sequence.invoke(2)) # invoke the sequence (note the retry above!!)
Debugging and tracing
=====================
As the chains get longer, it can be useful to be able to see intermediate results
to debug and trace the chain.
You can set the global debug flag to True to enable debug output for all chains:
.. code-block:: python
from langchain_core.globals import set_debug
set_debug(True)
Alternatively, you can pass existing or custom callbacks to any given chain:
.. code-block:: python
from langchain_core.tracers import ConsoleCallbackHandler
chain.invoke(
...,
config={'callbacks': [ConsoleCallbackHandler()]}
)
For a UI (and much more) checkout LangSmith: https://docs.smith.langchain.com/
"""
@property
def InputType(self) -> Type[Input]:
"""The type of input this runnable accepts specified as a type annotation."""
for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]
type_args = get_args(cls)
if type_args and len(type_args) == 2:
return type_args[0]
raise TypeError(
f"Runnable {self.__class__.__name__} doesn't have an inferable InputType. "
"Override the InputType property to specify the input type."
)
@property
def OutputType(self) -> Type[Output]:
"""The type of output this runnable produces specified as a type annotation."""
for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]
type_args = get_args(cls)
if type_args and len(type_args) == 2:
return type_args[1]
raise TypeError(
f"Runnable {self.__class__.__name__} doesn't have an inferable OutputType. "
"Override the OutputType property to specify the output type."
)
@property
def input_schema(self) -> Type[BaseModel]:
"""The type of input this runnable accepts specified as a pydantic model."""
return self.get_input_schema()
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
"""Get a pydantic model that can be used to validate input to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic input schema that depends on which
configuration the runnable is invoked with.
This method allows to get an input schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate input.
"""
root_type = self.InputType
if inspect.isclass(root_type) and issubclass(root_type, BaseModel):
return root_type
return create_model(
self.__class__.__name__ + "Input", __root__=(root_type, None)
)
@property
def output_schema(self) -> Type[BaseModel]:
"""The type of output this runnable produces specified as a pydantic model."""
return self.get_output_schema()
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
"""Get a pydantic model that can be used to validate output to the runnable.
Runnables that leverage the configurable_fields and configurable_alternatives
methods will have a dynamic output schema that depends on which
configuration the runnable is invoked with.
This method allows to get an output schema for a specific configuration.
Args:
config: A config to use when generating the schema.
Returns:
A pydantic model that can be used to validate output.
"""
root_type = self.OutputType
if inspect.isclass(root_type) and issubclass(root_type, BaseModel):
return root_type
return create_model(
self.__class__.__name__ + "Output", __root__=(root_type, None)
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
"""List configurable fields for this runnable."""
return []
def config_schema(
self, *, include: Optional[Sequence[str]] = None
) -> Type[BaseModel]:
"""The type of config this runnable accepts specified as a pydantic model.
To mark a field as configurable, see the `configurable_fields`
and `configurable_alternatives` methods.
Args:
include: A list of fields to include in the config schema.
Returns:
A pydantic model that can be used to validate config.
"""
class _Config:
arbitrary_types_allowed = True
include = include or []
config_specs = self.config_specs
configurable = (
create_model( # type: ignore[call-overload]
"Configurable",
**{
spec.id: (
spec.annotation,
Field(
spec.default, title=spec.name, description=spec.description
),
)
for spec in config_specs
},
)
if config_specs
else None
)
return create_model( # type: ignore[call-overload]
self.__class__.__name__ + "Config",
__config__=_Config,
**({"configurable": (configurable, None)} if configurable else {}),
**{
field_name: (field_type, None)
for field_name, field_type in RunnableConfig.__annotations__.items()
if field_name in [i for i in include if i != "configurable"]
},
)
def __or__(
self,
other: Union[
Runnable[Any, Other],
Callable[[Any], Other],
Callable[[Iterator[Any]], Iterator[Other]],
Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
],
) -> RunnableSerializable[Input, Other]:
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(first=self, last=coerce_to_runnable(other))
def __ror__(
self,
other: Union[
Runnable[Other, Any],
Callable[[Other], Any],
Callable[[Iterator[Other]], Iterator[Any]],
Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]],
],
) -> RunnableSerializable[Other, Output]:
"""Compose this runnable with another object to create a RunnableSequence."""
return RunnableSequence(first=coerce_to_runnable(other), last=self)
""" --- Public API --- """
@abstractmethod
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
"""Transform a single input into an output. Override to implement.
Args:
input: The input to the runnable.
config: A config to use when invoking the runnable.
The config supports standard keys like 'tags', 'metadata' for tracing
purposes, 'max_concurrency' for controlling how much work to do
in parallel, and other keys. Please refer to the RunnableConfig
for more details.
Returns:
The output of the runnable.
"""
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""Default implementation of ainvoke, calls invoke from a thread.
The default implementation allows usage of async code even if
the runnable did not implement a native async version of invoke.
Subclasses should override this method if they can run asynchronously.
"""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.invoke, **kwargs), input, config
)
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
"""Default implementation runs invoke in parallel using a thread pool executor.
The default implementation of batch works well for IO bound runnables.
Subclasses should override this method if they can batch more efficiently;
e.g., if the underlying runnable uses an API which supports a batch mode.
"""
if not inputs:
return []
configs = get_config_list(config, len(inputs))
def invoke(input: Input, config: RunnableConfig) -> Union[Output, Exception]:
if return_exceptions:
try:
return self.invoke(input, config, **kwargs)
except Exception as e:
return e
else:
return self.invoke(input, config, **kwargs)
# If there's only one input, don't bother with the executor
if len(inputs) == 1:
return cast(List[Output], [invoke(inputs[0], configs[0])])
with get_executor_for_config(configs[0]) as executor:
return cast(List[Output], list(executor.map(invoke, inputs, configs)))
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
"""Default implementation runs ainvoke in parallel using asyncio.gather.
The default implementation of batch works well for IO bound runnables.
Subclasses should override this method if they can batch more efficiently;
e.g., if the underlying runnable uses an API which supports a batch mode.
"""
if not inputs:
return []
configs = get_config_list(config, len(inputs))
async def ainvoke(
input: Input, config: RunnableConfig
) -> Union[Output, Exception]:
if return_exceptions:
try:
return await self.ainvoke(input, config, **kwargs)
except Exception as e:
return e
else:
return await self.ainvoke(input, config, **kwargs)
coros = map(ainvoke, inputs, configs)
return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
"""
Default implementation of stream, which calls invoke.
Subclasses should override this method if they support streaming output.
"""
yield self.invoke(input, config, **kwargs)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
"""
Default implementation of astream, which calls ainvoke.
Subclasses should override this method if they support streaming output.
"""
yield await self.ainvoke(input, config, **kwargs)
@overload
def astream_log(
self,
input: Any,
config: Optional[RunnableConfig] = None,
*,
diff: Literal[True] = True,
include_names: Optional[Sequence[str]] = None,
include_types: Optional[Sequence[str]] = None,
include_tags: Optional[Sequence[str]] = None,
exclude_names: Optional[Sequence[str]] = None,
exclude_types: Optional[Sequence[str]] = None,
exclude_tags: Optional[Sequence[str]] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[RunLogPatch]:
...
@overload
def astream_log(
self,
input: Any,
config: Optional[RunnableConfig] = None,
*,
diff: Literal[False],
include_names: Optional[Sequence[str]] = None,
include_types: Optional[Sequence[str]] = None,
include_tags: Optional[Sequence[str]] = None,
exclude_names: Optional[Sequence[str]] = None,
exclude_types: Optional[Sequence[str]] = None,
exclude_tags: Optional[Sequence[str]] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[RunLog]:
...
async def astream_log(
self,
input: Any,
config: Optional[RunnableConfig] = None,
*,
diff: bool = True,
include_names: Optional[Sequence[str]] = None,
include_types: Optional[Sequence[str]] = None,
include_tags: Optional[Sequence[str]] = None,
exclude_names: Optional[Sequence[str]] = None,
exclude_types: Optional[Sequence[str]] = None,
exclude_tags: Optional[Sequence[str]] = None,
**kwargs: Optional[Any],
) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]:
"""
Stream all output from a runnable, as reported to the callback system.
This includes all inner runs of LLMs, Retrievers, Tools, etc.
Output is streamed as Log objects, which include a list of
jsonpatch ops that describe how the state of the run has changed in each
step, and the final state of the run.
The jsonpatch ops can be applied in order to construct state.
"""
from langchain_core.callbacks.base import BaseCallbackManager
from langchain_core.tracers.log_stream import (
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
)
# Create a stream handler that will emit Log objects
stream = LogStreamCallbackHandler(
auto_close=False,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_names=exclude_names,
exclude_types=exclude_types,
exclude_tags=exclude_tags,
)
# Assign the stream handler to the config
config = config or {}
callbacks = config.get("callbacks")
if callbacks is None:
config["callbacks"] = [stream]
elif isinstance(callbacks, list):
config["callbacks"] = callbacks + [stream]
elif isinstance(callbacks, BaseCallbackManager):
callbacks = callbacks.copy()
callbacks.add_handler(stream, inherit=True)
config["callbacks"] = callbacks
else:
raise ValueError(
f"Unexpected type for callbacks: {callbacks}."
"Expected None, list or AsyncCallbackManager."
)
# Call the runnable in streaming mode,
# add each chunk to the output stream
async def consume_astream() -> None:
try:
async for chunk in self.astream(input, config, **kwargs):
await stream.send_stream.send(
RunLogPatch(
{
"op": "add",
"path": "/streamed_output/-",
"value": chunk,
}
)
)
finally:
await stream.send_stream.aclose()
# Start the runnable in a task, so we can start consuming output
task = asyncio.create_task(consume_astream())
try:
# Yield each chunk from the output stream
if diff:
async for log in stream:
yield log
else:
state = RunLog(state=None) # type: ignore[arg-type]
async for log in stream:
state = state + log
yield state
finally:
# Wait for the runnable to finish, if not cancelled (eg. by break)
try:
await task
except asyncio.CancelledError:
pass
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
"""
Default implementation of transform, which buffers input and then calls stream.
Subclasses should override this method if they can start producing output while
input is still being generated.
"""
final: Input
got_first_val = False
for chunk in input:
if not got_first_val:
final = chunk
got_first_val = True
else:
# Make a best effort to gather, for any type that supports `+`
# This method should throw an error if gathering fails.
final = final + chunk # type: ignore[operator]
if got_first_val:
yield from self.stream(final, config, **kwargs)
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
"""
Default implementation of atransform, which buffers input and calls astream.
Subclasses should override this method if they can start producing output while
input is still being generated.
"""
final: Input
got_first_val = False
async for chunk in input:
if not got_first_val:
final = chunk
got_first_val = True
else:
# Make a best effort to gather, for any type that supports `+`
# This method should throw an error if gathering fails.
final = final + chunk # type: ignore[operator]
if got_first_val:
async for output in self.astream(final, config, **kwargs):
yield output
def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
"""
Bind arguments to a Runnable, returning a new Runnable.
"""
return RunnableBinding(bound=self, kwargs=kwargs, config={})
def with_config(
self,
config: Optional[RunnableConfig] = None,
# Sadly Unpack is not well supported by mypy so this will have to be untyped
**kwargs: Any,
) -> Runnable[Input, Output]:
"""
Bind config to a Runnable, returning a new Runnable.
"""
return RunnableBinding(
bound=self,
config=cast(
RunnableConfig,
{**(config or {}), **kwargs},
), # type: ignore[misc]
kwargs={},
)
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
) -> Runnable[Input, Output]:
"""
Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return RunnableBinding(
bound=self,
config_factories=[
lambda config: {
"callbacks": [
RootListenersTracer(
config=config,
on_start=on_start,
on_end=on_end,
on_error=on_error,
)
],
}
],
)
def with_types(
self,
*,
input_type: Optional[Type[Input]] = None,
output_type: Optional[Type[Output]] = None,
) -> Runnable[Input, Output]:
"""
Bind input and output types to a Runnable, returning a new Runnable.
"""
return RunnableBinding(
bound=self,
custom_input_type=input_type,
custom_output_type=output_type,
kwargs={},
)
def with_retry(
self,
*,
retry_if_exception_type: Tuple[Type[BaseException], ...] = (Exception,),
wait_exponential_jitter: bool = True,
stop_after_attempt: int = 3,
) -> Runnable[Input, Output]:
"""Create a new Runnable that retries the original runnable on exceptions.
Args:
retry_if_exception_type: A tuple of exception types to retry on
wait_exponential_jitter: Whether to add jitter to the wait time
between retries
stop_after_attempt: The maximum number of attempts to make before giving up
Returns:
A new Runnable that retries the original runnable on exceptions.
"""
from langchain_core.runnables.retry import RunnableRetry
return RunnableRetry(
bound=self,
kwargs={},
config={},
retry_exception_types=retry_if_exception_type,
wait_exponential_jitter=wait_exponential_jitter,
max_attempt_number=stop_after_attempt,
)
def map(self) -> Runnable[List[Input], List[Output]]:
"""
Return a new Runnable that maps a list of inputs to a list of outputs,
by calling invoke() with each input.
"""
return RunnableEach(bound=self)
def with_fallbacks(
self,
fallbacks: Sequence[Runnable[Input, Output]],
*,
exceptions_to_handle: Tuple[Type[BaseException], ...] = (Exception,),
) -> RunnableWithFallbacksT[Input, Output]:
"""Add fallbacks to a runnable, returning a new Runnable.
Args:
fallbacks: A sequence of runnables to try if the original runnable fails.
exceptions_to_handle: A tuple of exception types to handle.
Returns:
A new Runnable that will try the original runnable, and then each
fallback in order, upon failures.
"""
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
return RunnableWithFallbacks(
runnable=self,
fallbacks=fallbacks,
exceptions_to_handle=exceptions_to_handle,
)
""" --- Helper methods for Subclasses --- """
def _call_with_config(
self,
func: Union[
Callable[[Input], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output],
],
input: Input,
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> Output:
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
input,
run_type=run_type,
name=config.get("run_name"),
)
try:
output = call_func_with_variable_args(
func, input, config, run_manager, **kwargs
)
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(dumpd(output))
return output
async def _acall_with_config(
self,
func: Union[
Callable[[Input], Awaitable[Output]],
Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]],
Callable[
[Input, AsyncCallbackManagerForChainRun, RunnableConfig],
Awaitable[Output],
],
],
input: Input,
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> Output:
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement ainvoke() in subclasses."""
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
input,
run_type=run_type,
name=config.get("run_name"),
)
try:
output = await acall_func_with_variable_args(
func, input, config, run_manager, **kwargs
)
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(dumpd(output))
return output
def _batch_with_config(
self,
func: Union[
Callable[[List[Input]], List[Union[Exception, Output]]],
Callable[
[List[Input], List[CallbackManagerForChainRun]],
List[Union[Exception, Output]],
],
Callable[
[List[Input], List[CallbackManagerForChainRun], List[RunnableConfig]],
List[Union[Exception, Output]],
],
],
input: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> List[Output]:
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses."""
if not input:
return []
configs = get_config_list(config, len(input))
callback_managers = [get_callback_manager_for_config(c) for c in configs]
run_managers = [
callback_manager.on_chain_start(
dumpd(self),
input,
run_type=run_type,
name=config.get("run_name"),
)
for callback_manager, input, config in zip(
callback_managers, input, configs
)
]
try:
if accepts_config(func):
kwargs["config"] = [
patch_config(c, callbacks=rm.get_child())
for c, rm in zip(configs, run_managers)
]
if accepts_run_manager(func):
kwargs["run_manager"] = run_managers
output = func(input, **kwargs) # type: ignore[call-arg]
except BaseException as e:
for run_manager in run_managers:
run_manager.on_chain_error(e)
if return_exceptions:
return cast(List[Output], [e for _ in input])
else:
raise
else:
first_exception: Optional[Exception] = None
for run_manager, out in zip(run_managers, output):
if isinstance(out, Exception):
first_exception = first_exception or out
run_manager.on_chain_error(out)
else:
run_manager.on_chain_end(dumpd(out))
if return_exceptions or first_exception is None:
return cast(List[Output], output)
else:
raise first_exception
async def _abatch_with_config(
self,
func: Union[
Callable[[List[Input]], Awaitable[List[Union[Exception, Output]]]],
Callable[
[List[Input], List[AsyncCallbackManagerForChainRun]],
Awaitable[List[Union[Exception, Output]]],
],
Callable[
[
List[Input],
List[AsyncCallbackManagerForChainRun],
List[RunnableConfig],
],
Awaitable[List[Union[Exception, Output]]],
],
],
input: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> List[Output]:
"""Helper method to transform an Input value to an Output value,
with callbacks. Use this method to implement invoke() in subclasses."""
if not input:
return []
configs = get_config_list(config, len(input))
callback_managers = [get_async_callback_manager_for_config(c) for c in configs]
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
callback_manager.on_chain_start(
dumpd(self),
input,
run_type=run_type,
name=config.get("run_name"),
)
for callback_manager, input, config in zip(
callback_managers, input, configs
)
)
)
try:
if accepts_config(func):
kwargs["config"] = [
patch_config(c, callbacks=rm.get_child())
for c, rm in zip(configs, run_managers)
]
if accepts_run_manager(func):
kwargs["run_manager"] = run_managers
output = await func(input, **kwargs) # type: ignore[call-arg]
except BaseException as e:
await asyncio.gather(
*(run_manager.on_chain_error(e) for run_manager in run_managers)
)
if return_exceptions:
return cast(List[Output], [e for _ in input])
else:
raise
else:
first_exception: Optional[Exception] = None
coros: List[Awaitable[None]] = []
for run_manager, out in zip(run_managers, output):
if isinstance(out, Exception):
first_exception = first_exception or out
coros.append(run_manager.on_chain_error(out))
else:
coros.append(run_manager.on_chain_end(dumpd(out)))
await asyncio.gather(*coros)
if return_exceptions or first_exception is None:
return cast(List[Output], output)
else:
raise first_exception
def _transform_stream_with_config(
self,
input: Iterator[Input],
transformer: Union[
Callable[[Iterator[Input]], Iterator[Output]],
Callable[[Iterator[Input], CallbackManagerForChainRun], Iterator[Output]],
Callable[
[
Iterator[Input],
CallbackManagerForChainRun,
RunnableConfig,
],
Iterator[Output],
],
],
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
"""Helper method to transform an Iterator of Input values into an Iterator of
Output values, with callbacks.
Use this to implement `stream()` or `transform()` in Runnable subclasses."""
# tee the input so we can iterate over it twice
input_for_tracing, input_for_transform = tee(input, 2)
# Start the input iterator to ensure the input runnable starts before this one
final_input: Optional[Input] = next(input_for_tracing, None)
final_input_supported = True
final_output: Optional[Output] = None
final_output_supported = True
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
dumpd(self),
{"input": ""},
run_type=run_type,
name=config.get("run_name"),
)
try:
if accepts_config(transformer):
kwargs["config"] = patch_config(
config, callbacks=run_manager.get_child()
)
if accepts_run_manager(transformer):
kwargs["run_manager"] = run_manager
iterator = transformer(input_for_transform, **kwargs) # type: ignore[call-arg]
for chunk in iterator:
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore
except TypeError:
final_output = None
final_output_supported = False
for ichunk in input_for_tracing:
if final_input_supported:
if final_input is None:
final_input = ichunk
else:
try:
final_input = final_input + ichunk # type: ignore
except TypeError:
final_input = None
final_input_supported = False
except BaseException as e:
run_manager.on_chain_error(e, inputs=final_input)
raise
else:
run_manager.on_chain_end(final_output, inputs=final_input)
async def _atransform_stream_with_config(
self,
input: AsyncIterator[Input],
transformer: Union[
Callable[[AsyncIterator[Input]], AsyncIterator[Output]],
Callable[
[AsyncIterator[Input], AsyncCallbackManagerForChainRun],
AsyncIterator[Output],
],
Callable[
[
AsyncIterator[Input],
AsyncCallbackManagerForChainRun,
RunnableConfig,
],
AsyncIterator[Output],
],
],
config: Optional[RunnableConfig],
run_type: Optional[str] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
"""Helper method to transform an Async Iterator of Input values into an Async
Iterator of Output values, with callbacks.
Use this to implement `astream()` or `atransform()` in Runnable subclasses."""
# tee the input so we can iterate over it twice
input_for_tracing, input_for_transform = atee(input, 2)
# Start the input iterator to ensure the input runnable starts before this one
final_input: Optional[Input] = await py_anext(input_for_tracing, None)
final_input_supported = True
final_output: Optional[Output] = None
final_output_supported = True
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
dumpd(self),
{"input": ""},
run_type=run_type,
name=config.get("run_name"),
)
try:
if accepts_config(transformer):
kwargs["config"] = patch_config(
config, callbacks=run_manager.get_child()
)
if accepts_run_manager(transformer):
kwargs["run_manager"] = run_manager
iterator = transformer(input_for_transform, **kwargs) # type: ignore[call-arg]
async for chunk in iterator:
yield chunk
if final_output_supported:
if final_output is None:
final_output = chunk
else:
try:
final_output = final_output + chunk # type: ignore
except TypeError:
final_output = None
final_output_supported = False
async for ichunk in input_for_tracing:
if final_input_supported:
if final_input is None:
final_input = ichunk
else:
try:
final_input = final_input + ichunk # type: ignore[operator]
except TypeError:
final_input = None
final_input_supported = False
except BaseException as e:
await run_manager.on_chain_error(e, inputs=final_input)
raise
else:
await run_manager.on_chain_end(final_output, inputs=final_input)
class RunnableSerializable(Serializable, Runnable[Input, Output]):
"""A Runnable that can be serialized to JSON."""
def configurable_fields(
self, **kwargs: AnyConfigurableField
) -> RunnableSerializable[Input, Output]:
from langchain_core.runnables.configurable import RunnableConfigurableFields
for key in kwargs:
if key not in self.__fields__:
raise ValueError(
f"Configuration key {key} not found in {self}: "
"available keys are {self.__fields__.keys()}"
)
return RunnableConfigurableFields(default=self, fields=kwargs)
def configurable_alternatives(
self,
which: ConfigurableField,
*,
default_key: str = "default",
prefix_keys: bool = False,
**kwargs: Union[Runnable[Input, Output], Callable[[], Runnable[Input, Output]]],
) -> RunnableSerializable[Input, Output]:
from langchain_core.runnables.configurable import (
RunnableConfigurableAlternatives,
)
return RunnableConfigurableAlternatives(
which=which,
default=self,
alternatives=kwargs,
default_key=default_key,
prefix_keys=prefix_keys,
)
class RunnableSequence(RunnableSerializable[Input, Output]):
"""A sequence of runnables, where the output of each is the input of the next.
RunnableSequence is the most important composition operator in LangChain as it is
used in virtually every chain.
A RunnableSequence can be instantiated directly or more commonly by using the `|`
operator where either the left or right operands (or both) must be a Runnable.
Any RunnableSequence automatically supports sync, async, batch.
The default implementations of `batch` and `abatch` utilize threadpools and
asyncio gather and will be faster than naive invocation of invoke or ainvoke
for IO bound runnables.
Batching is implemented by invoking the batch method on each component of the
RunnableSequence in order.
A RunnableSequence preserves the streaming properties of its components, so if all
components of the sequence implement a `transform` method -- which
is the method that implements the logic to map a streaming input to a streaming
output -- then the sequence will be able to stream input to output!
If any component of the sequence does not implement transform then the
streaming will only begin after this component is run. If there are
multiple blocking components, streaming begins after the last one.
Please note: RunnableLambdas do not support `transform` by default! So if
you need to use a RunnableLambdas be careful about where you place them in a
RunnableSequence (if you need to use the .stream()/.astream() methods).
If you need arbitrary logic and need streaming, you can subclass
Runnable, and implement `transform` for whatever logic you need.
Here is a simple example that uses simple functions to illustrate the use of
RunnableSequence:
.. code-block:: python
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
def mul_two(x: int) -> int:
return x * 2
runnable_1 = RunnableLambda(add_one)
runnable_2 = RunnableLambda(mul_two)
sequence = runnable_1 | runnable_2
# Or equivalently:
# sequence = RunnableSequence(first=runnable_1, last=runnable_2)
sequence.invoke(1)
await runnable.ainvoke(1)
sequence.batch([1, 2, 3])
await sequence.abatch([1, 2, 3])
Here's an example that uses streams JSON output generated by an LLM:
.. code-block:: python
from langchain_core.output_parsers.json import SimpleJsonOutputParser
from langchain_core.chat_models.openai import ChatOpenAI
prompt = PromptTemplate.from_template(
'In JSON format, give me a list of {topic} and their '
'corresponding names in French, Spanish and in a '
'Cat Language.'
)
model = ChatOpenAI()
chain = prompt | model | SimpleJsonOutputParser()
async for chunk in chain.astream({'topic': 'colors'}):
print('-')
print(chunk, sep='', flush=True)
"""
# The steps are broken into first, middle and last, solely for type checking
# purposes. It allows specifying the `Input` on the first type, the `Output` of
# the last type.
first: Runnable[Input, Any]
"""The first runnable in the sequence."""
middle: List[Runnable[Any, Any]] = Field(default_factory=list)
"""The middle runnables in the sequence."""
last: Runnable[Any, Output]
"""The last runnable in the sequence."""
@property
def steps(self) -> List[Runnable[Any, Any]]:
"""All the runnables that make up the sequence in order."""
return [self.first] + self.middle + [self.last]
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Type[Input]:
return self.first.InputType
@property
def OutputType(self) -> Type[Output]:
return self.last.OutputType
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
from langchain_core.runnables.passthrough import RunnableAssign
if isinstance(self.first, RunnableAssign):
first = cast(RunnableAssign, self.first)
next_ = self.middle[0] if self.middle else self.last
next_input_schema = next_.get_input_schema(config)
if not next_input_schema.__custom_root_type__:
# it's a dict as expected
return create_model( # type: ignore[call-overload]
"RunnableSequenceInput",
**{
k: (v.annotation, v.default)
for k, v in next_input_schema.__fields__.items()
if k not in first.mapper.steps
},
)
return self.first.get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return self.last.get_output_schema(config)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec for step in self.steps for spec in step.config_specs
)
def __repr__(self) -> str:
return "\n| ".join(
repr(s) if i == 0 else indent_lines_after_first(repr(s), "| ")
for i, s in enumerate(self.steps)
)
def __or__(
self,
other: Union[
Runnable[Any, Other],
Callable[[Any], Other],
Callable[[Iterator[Any]], Iterator[Other]],
Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
],
) -> RunnableSerializable[Input, Other]:
if isinstance(other, RunnableSequence):
return RunnableSequence(
first=self.first,
middle=self.middle + [self.last] + [other.first] + other.middle,
last=other.last,
)
else:
return RunnableSequence(
first=self.first,
middle=self.middle + [self.last],
last=coerce_to_runnable(other),
)
def __ror__(
self,
other: Union[
Runnable[Other, Any],
Callable[[Other], Any],
Callable[[Iterator[Other]], Iterator[Any]],
Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any], Any]],
],
) -> RunnableSerializable[Other, Output]:
if isinstance(other, RunnableSequence):
return RunnableSequence(
first=other.first,
middle=other.middle + [other.last] + [self.first] + self.middle,
last=self.last,
)
else:
return RunnableSequence(
first=coerce_to_runnable(other),
middle=[self.first] + self.middle,
last=self.last,
)
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
input = step.invoke(
input,
# mark each step as a child run
patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
),
)
# finish the root run
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(input)
return cast(Output, input)
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
input = await step.ainvoke(
input,
# mark each step as a child run
patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
),
)
# finish the root run
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(input)
return cast(Output, input)
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain_core.callbacks.manager import CallbackManager
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers = [
cm.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
]
# invoke
try:
if return_exceptions:
# Track which inputs (by index) failed so far
# If an input has failed it will be present in this map,
# and the value will be the exception that was raised.
failed_inputs_map: Dict[int, Exception] = {}
for stepidx, step in enumerate(self.steps):
# Assemble the original indexes of the remaining inputs
# (i.e. the ones that haven't failed yet)
remaining_idxs = [
i for i in range(len(configs)) if i not in failed_inputs_map
]
# Invoke the step on the remaining inputs
inputs = step.batch(
[
inp
for i, inp in zip(remaining_idxs, inputs)
if i not in failed_inputs_map
],
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
],
return_exceptions=return_exceptions,
**kwargs,
)
# If an input failed, add it to the map
for i, inp in zip(remaining_idxs, inputs):
if isinstance(inp, Exception):
failed_inputs_map[i] = inp
inputs = [inp for inp in inputs if not isinstance(inp, Exception)]
# If all inputs have failed, stop processing
if len(failed_inputs_map) == len(configs):
break
# Reassemble the outputs, inserting Exceptions for failed inputs
inputs_copy = inputs.copy()
inputs = []
for i in range(len(configs)):
if i in failed_inputs_map:
inputs.append(cast(Input, failed_inputs_map[i]))
else:
inputs.append(inputs_copy.pop(0))
else:
for i, step in enumerate(self.steps):
inputs = step.batch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
)
for rm, config in zip(run_managers, configs)
],
)
# finish the root runs
except BaseException as e:
for rm in run_managers:
rm.on_chain_error(e)
if return_exceptions:
return cast(List[Output], [e for _ in inputs])
else:
raise
else:
first_exception: Optional[Exception] = None
for run_manager, out in zip(run_managers, inputs):
if isinstance(out, Exception):
first_exception = first_exception or out
run_manager.on_chain_error(out)
else:
run_manager.on_chain_end(dumpd(out))
if return_exceptions or first_exception is None:
return cast(List[Output], inputs)
else:
raise first_exception
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
)
if not inputs:
return []
# setup callbacks
configs = get_config_list(config, len(inputs))
callback_managers = [
AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
for config in configs
]
# start the root runs, one per input
run_managers: List[AsyncCallbackManagerForChainRun] = await asyncio.gather(
*(
cm.on_chain_start(
dumpd(self),
input,
name=config.get("run_name"),
)
for cm, input, config in zip(callback_managers, inputs, configs)
)
)
# invoke .batch() on each step
# this uses batching optimizations in Runnable subclasses, like LLM
try:
if return_exceptions:
# Track which inputs (by index) failed so far
# If an input has failed it will be present in this map,
# and the value will be the exception that was raised.
failed_inputs_map: Dict[int, Exception] = {}
for stepidx, step in enumerate(self.steps):
# Assemble the original indexes of the remaining inputs
# (i.e. the ones that haven't failed yet)
remaining_idxs = [
i for i in range(len(configs)) if i not in failed_inputs_map
]
# Invoke the step on the remaining inputs
inputs = await step.abatch(
[
inp
for i, inp in zip(remaining_idxs, inputs)
if i not in failed_inputs_map
],
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{stepidx+1}")
)
for i, (rm, config) in enumerate(zip(run_managers, configs))
if i not in failed_inputs_map
],
return_exceptions=return_exceptions,
**kwargs,
)
# If an input failed, add it to the map
for i, inp in zip(remaining_idxs, inputs):
if isinstance(inp, Exception):
failed_inputs_map[i] = inp
inputs = [inp for inp in inputs if not isinstance(inp, Exception)]
# If all inputs have failed, stop processing
if len(failed_inputs_map) == len(configs):
break
# Reassemble the outputs, inserting Exceptions for failed inputs
inputs_copy = inputs.copy()
inputs = []
for i in range(len(configs)):
if i in failed_inputs_map:
inputs.append(cast(Input, failed_inputs_map[i]))
else:
inputs.append(inputs_copy.pop(0))
else:
for i, step in enumerate(self.steps):
inputs = await step.abatch(
inputs,
[
# each step a child run of the corresponding root run
patch_config(
config, callbacks=rm.get_child(f"seq:step:{i+1}")
)
for rm, config in zip(run_managers, configs)
],
)
# finish the root runs
except BaseException as e:
await asyncio.gather(*(rm.on_chain_error(e) for rm in run_managers))
if return_exceptions:
return cast(List[Output], [e for _ in inputs])
else:
raise
else:
first_exception: Optional[Exception] = None
coros: List[Awaitable[None]] = []
for run_manager, out in zip(run_managers, inputs):
if isinstance(out, Exception):
first_exception = first_exception or out
coros.append(run_manager.on_chain_error(out))
else:
coros.append(run_manager.on_chain_end(dumpd(out)))
await asyncio.gather(*coros)
if return_exceptions or first_exception is None:
return cast(List[Output], inputs)
else:
raise first_exception
def _transform(
self,
input: Iterator[Input],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
) -> Iterator[Output]:
steps = [self.first] + self.middle + [self.last]
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
final_pipeline = cast(Iterator[Output], input)
for step in steps:
final_pipeline = step.transform(
final_pipeline,
patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{steps.index(step)+1}"),
),
)
for output in final_pipeline:
yield output
async def _atransform(
self,
input: AsyncIterator[Input],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
) -> AsyncIterator[Output]:
steps = [self.first] + self.middle + [self.last]
# stream the last steps
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
final_pipeline = cast(AsyncIterator[Output], input)
for step in steps:
final_pipeline = step.atransform(
final_pipeline,
patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{steps.index(step)+1}"),
),
)
async for output in final_pipeline:
yield output
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
yield from self._transform_stream_with_config(
input, self._transform, config, **kwargs
)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
yield from self.transform(iter([input]), config, **kwargs)
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
):
yield chunk
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
async for chunk in self.atransform(input_aiter(), config, **kwargs):
yield chunk
class RunnableParallel(RunnableSerializable[Input, Dict[str, Any]]):
"""
A runnable that runs a mapping of runnables in parallel,
and returns a mapping of their outputs.
"""
steps: Mapping[str, Runnable[Input, Any]]
def __init__(
self,
__steps: Optional[
Mapping[
str,
Union[
Runnable[Input, Any],
Callable[[Input], Any],
Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]],
],
]
] = None,
**kwargs: Union[
Runnable[Input, Any],
Callable[[Input], Any],
Mapping[str, Union[Runnable[Input, Any], Callable[[Input], Any]]],
],
) -> None:
merged = {**__steps} if __steps is not None else {}
merged.update(kwargs)
super().__init__(
steps={key: coerce_to_runnable(r) for key, r in merged.items()}
)
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Any:
for step in self.steps.values():
if step.InputType:
return step.InputType
return Any
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
if all(
s.get_input_schema(config).schema().get("type", "object") == "object"
for s in self.steps.values()
):
# This is correct, but pydantic typings/mypy don't think so.
return create_model( # type: ignore[call-overload]
"RunnableParallelInput",
**{
k: (v.annotation, v.default)
for step in self.steps.values()
for k, v in step.get_input_schema(config).__fields__.items()
if k != "__root__"
},
)
return super().get_input_schema(config)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
# This is correct, but pydantic typings/mypy don't think so.
return create_model( # type: ignore[call-overload]
"RunnableParallelOutput",
**{k: (v.OutputType, None) for k, v in self.steps.items()},
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return get_unique_config_specs(
spec for step in self.steps.values() for spec in step.config_specs
)
def __repr__(self) -> str:
map_for_repr = ",\n ".join(
f"{k}: {indent_lines_after_first(repr(v), ' ' + k + ': ')}"
for k, v in self.steps.items()
)
return "{\n " + map_for_repr + "\n}"
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None
) -> Dict[str, Any]:
from langchain_core.callbacks.manager import CallbackManager
# setup callbacks
config = ensure_config(config)
callback_manager = CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
# start the root run
run_manager = callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
# gather results from all steps
try:
# copy to avoid issues from the caller mutating the steps during invoke()
steps = dict(self.steps)
with get_executor_for_config(config) as executor:
futures = [
executor.submit(
step.invoke,
input,
# mark each step as a child run
patch_config(
config,
callbacks=run_manager.get_child(f"map:key:{key}"),
),
)
for key, step in steps.items()
]
output = {key: future.result() for key, future in zip(steps, futures)}
# finish the root run
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(output)
return output
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Dict[str, Any]:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
dumpd(self), input, name=config.get("run_name")
)
# gather results from all steps
try:
# copy to avoid issues from the caller mutating the steps during invoke()
steps = dict(self.steps)
results = await asyncio.gather(
*(
step.ainvoke(
input,
# mark each step as a child run
patch_config(
config, callbacks=run_manager.get_child(f"map:key:{key}")
),
)
for key, step in steps.items()
)
)
output = {key: value for key, value in zip(steps, results)}
# finish the root run
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(output)
return output
def _transform(
self,
input: Iterator[Input],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
) -> Iterator[AddableDict]:
# Shallow copy steps to ignore mutations while in progress
steps = dict(self.steps)
# Each step gets a copy of the input iterator,
# which is consumed in parallel in a separate thread.
input_copies = list(safetee(input, len(steps), lock=threading.Lock()))
with get_executor_for_config(config) as executor:
# Create the transform() generator for each step
named_generators = [
(
name,
step.transform(
input_copies.pop(),
patch_config(
config, callbacks=run_manager.get_child(f"map:key:{name}")
),
),
)
for name, step in steps.items()
]
# Start the first iteration of each generator
futures = {
executor.submit(next, generator): (step_name, generator)
for step_name, generator in named_generators
}
# Yield chunks from each as they become available,
# and start the next iteration of that generator that yielded it.
# When all generators are exhausted, stop.
while futures:
completed_futures, _ = wait(futures, return_when=FIRST_COMPLETED)
for future in completed_futures:
(step_name, generator) = futures.pop(future)
try:
chunk = AddableDict({step_name: future.result()})
yield chunk
futures[executor.submit(next, generator)] = (
step_name,
generator,
)
except StopIteration:
pass
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Dict[str, Any]]:
yield from self._transform_stream_with_config(
input, self._transform, config, **kwargs
)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Dict[str, Any]]:
yield from self.transform(iter([input]), config)
async def _atransform(
self,
input: AsyncIterator[Input],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
) -> AsyncIterator[AddableDict]:
# Shallow copy steps to ignore mutations while in progress
steps = dict(self.steps)
# Each step gets a copy of the input iterator,
# which is consumed in parallel in a separate thread.
input_copies = list(atee(input, len(steps), lock=asyncio.Lock()))
# Create the transform() generator for each step
named_generators = [
(
name,
step.atransform(
input_copies.pop(),
patch_config(
config, callbacks=run_manager.get_child(f"map:key:{name}")
),
),
)
for name, step in steps.items()
]
# Wrap in a coroutine to satisfy linter
async def get_next_chunk(generator: AsyncIterator) -> Optional[Output]:
return await py_anext(generator)
# Start the first iteration of each generator
tasks = {
asyncio.create_task(get_next_chunk(generator)): (step_name, generator)
for step_name, generator in named_generators
}
# Yield chunks from each as they become available,
# and start the next iteration of the generator that yielded it.
# When all generators are exhausted, stop.
while tasks:
completed_tasks, _ = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in completed_tasks:
(step_name, generator) = tasks.pop(task)
try:
chunk = AddableDict({step_name: task.result()})
yield chunk
new_task = asyncio.create_task(get_next_chunk(generator))
tasks[new_task] = (step_name, generator)
except StopAsyncIteration:
pass
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Dict[str, Any]]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
):
yield chunk
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Dict[str, Any]]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
async for chunk in self.atransform(input_aiter(), config):
yield chunk
# We support both names
RunnableMap = RunnableParallel
class RunnableGenerator(Runnable[Input, Output]):
"""
A runnable that runs a generator function.
"""
def __init__(
self,
transform: Union[
Callable[[Iterator[Input]], Iterator[Output]],
Callable[[AsyncIterator[Input]], AsyncIterator[Output]],
],
atransform: Optional[
Callable[[AsyncIterator[Input]], AsyncIterator[Output]]
] = None,
) -> None:
if atransform is not None:
self._atransform = atransform
if inspect.isasyncgenfunction(transform):
self._atransform = transform
elif inspect.isgeneratorfunction(transform):
self._transform = transform
else:
raise TypeError(
"Expected a generator function type for `transform`."
f"Instead got an unsupported type: {type(transform)}"
)
@property
def InputType(self) -> Any:
func = getattr(self, "_transform", None) or getattr(self, "_atransform")
try:
params = inspect.signature(func).parameters
first_param = next(iter(params.values()), None)
if first_param and first_param.annotation != inspect.Parameter.empty:
return getattr(first_param.annotation, "__args__", (Any,))[0]
else:
return Any
except ValueError:
return Any
@property
def OutputType(self) -> Any:
func = getattr(self, "_transform", None) or getattr(self, "_atransform")
try:
sig = inspect.signature(func)
return (
getattr(sig.return_annotation, "__args__", (Any,))[0]
if sig.return_annotation != inspect.Signature.empty
else Any
)
except ValueError:
return Any
def __eq__(self, other: Any) -> bool:
if isinstance(other, RunnableGenerator):
if hasattr(self, "_transform") and hasattr(other, "_transform"):
return self._transform == other._transform
elif hasattr(self, "_atransform") and hasattr(other, "_atransform"):
return self._atransform == other._atransform
else:
return False
else:
return False
def __repr__(self) -> str:
return "RunnableGenerator(...)"
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Output]:
return self._transform_stream_with_config(
input, self._transform, config, **kwargs
)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Output]:
return self.transform(iter([input]), config, **kwargs)
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
final = None
for output in self.stream(input, config, **kwargs):
if final is None:
final = output
else:
final = final + output
return cast(Output, final)
def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Output]:
if not hasattr(self, "_atransform"):
raise NotImplementedError("This runnable does not support async methods.")
return self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
)
def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Output]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
return self.atransform(input_aiter(), config, **kwargs)
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
final = None
async for output in self.astream(input, config, **kwargs):
if final is None:
final = output
else:
final = final + output
return cast(Output, final)
class RunnableLambda(Runnable[Input, Output]):
"""RunnableLambda converts a python callable into a Runnable.
Wrapping a callable in a RunnableLambda makes the callable usable
within either a sync or async context.
RunnableLambda can be composed as any other Runnable and provides
seamless integration with LangChain tracing.
Examples:
.. code-block:: python
# This is a RunnableLambda
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
runnable = RunnableLambda(add_one)
runnable.invoke(1) # returns 2
runnable.batch([1, 2, 3]) # returns [2, 3, 4]
# Async is supported by default by delegating to the sync implementation
await runnable.ainvoke(1) # returns 2
await runnable.abatch([1, 2, 3]) # returns [2, 3, 4]
# Alternatively, can provide both synd and sync implementations
async def add_one_async(x: int) -> int:
return x + 1
runnable = RunnableLambda(add_one, afunc=add_one_async)
runnable.invoke(1) # Uses add_one
await runnable.ainvoke(1) # Uses add_one_async
"""
def __init__(
self,
func: Union[
Union[
Callable[[Input], Output],
Callable[[Input, RunnableConfig], Output],
Callable[[Input, CallbackManagerForChainRun], Output],
Callable[[Input, CallbackManagerForChainRun, RunnableConfig], Output],
],
Union[
Callable[[Input], Awaitable[Output]],
Callable[[Input, RunnableConfig], Awaitable[Output]],
Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]],
Callable[
[Input, AsyncCallbackManagerForChainRun, RunnableConfig],
Awaitable[Output],
],
],
],
afunc: Optional[
Union[
Callable[[Input], Awaitable[Output]],
Callable[[Input, RunnableConfig], Awaitable[Output]],
Callable[[Input, AsyncCallbackManagerForChainRun], Awaitable[Output]],
Callable[
[Input, AsyncCallbackManagerForChainRun, RunnableConfig],
Awaitable[Output],
],
]
] = None,
) -> None:
"""Create a RunnableLambda from a callable, and async callable or both.
Accepts both sync and async variants to allow providing efficient
implementations for sync and async execution.
Args:
func: Either sync or async callable
afunc: An async callable that takes an input and returns an output.
"""
if afunc is not None:
self.afunc = afunc
if inspect.iscoroutinefunction(func):
if afunc is not None:
raise TypeError(
"Func was provided as a coroutine function, but afunc was "
"also provided. If providing both, func should be a regular "
"function to avoid ambiguity."
)
self.afunc = func
elif callable(func):
self.func = cast(Callable[[Input], Output], func)
else:
raise TypeError(
"Expected a callable type for `func`."
f"Instead got an unsupported type: {type(func)}"
)
@property
def InputType(self) -> Any:
"""The type of the input to this runnable."""
func = getattr(self, "func", None) or getattr(self, "afunc")
try:
params = inspect.signature(func).parameters
first_param = next(iter(params.values()), None)
if first_param and first_param.annotation != inspect.Parameter.empty:
return first_param.annotation
else:
return Any
except ValueError:
return Any
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
"""The pydantic schema for the input to this runnable."""
func = getattr(self, "func", None) or getattr(self, "afunc")
if isinstance(func, itemgetter):
# This is terrible, but afaict it's not possible to access _items
# on itemgetter objects, so we have to parse the repr
items = str(func).replace("operator.itemgetter(", "")[:-1].split(", ")
if all(
item[0] == "'" and item[-1] == "'" and len(item) > 2 for item in items
):
# It's a dict, lol
return create_model(
"RunnableLambdaInput",
**{item[1:-1]: (Any, None) for item in items}, # type: ignore
)
else:
return create_model("RunnableLambdaInput", __root__=(List[Any], None))
if self.InputType != Any:
return super().get_input_schema(config)
if dict_keys := get_function_first_arg_dict_keys(func):
return create_model(
"RunnableLambdaInput",
**{key: (Any, None) for key in dict_keys}, # type: ignore
)
return super().get_input_schema(config)
@property
def OutputType(self) -> Any:
"""The type of the output of this runnable as a type annotation."""
func = getattr(self, "func", None) or getattr(self, "afunc")
try:
sig = inspect.signature(func)
return (
sig.return_annotation
if sig.return_annotation != inspect.Signature.empty
else Any
)
except ValueError:
return Any
def __eq__(self, other: Any) -> bool:
if isinstance(other, RunnableLambda):
if hasattr(self, "func") and hasattr(other, "func"):
return self.func == other.func
elif hasattr(self, "afunc") and hasattr(other, "afunc"):
return self.afunc == other.afunc
else:
return False
else:
return False
def __repr__(self) -> str:
"""A string representation of this runnable."""
if hasattr(self, "func"):
return f"RunnableLambda({get_lambda_source(self.func) or '...'})"
elif hasattr(self, "afunc"):
return f"RunnableLambda(afunc={get_lambda_source(self.afunc) or '...'})"
else:
return "RunnableLambda(...)"
def _invoke(
self,
input: Input,
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> Output:
output = call_func_with_variable_args(
self.func, input, config, run_manager, **kwargs
)
# If the output is a runnable, invoke it
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
f"Recursion limit reached when invoking {self} with input {input}."
)
output = output.invoke(
input,
patch_config(
config,
callbacks=run_manager.get_child(),
recursion_limit=recursion_limit - 1,
),
)
return output
async def _ainvoke(
self,
input: Input,
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> Output:
output = await acall_func_with_variable_args(
self.afunc, input, config, run_manager, **kwargs
)
# If the output is a runnable, invoke it
if isinstance(output, Runnable):
recursion_limit = config["recursion_limit"]
if recursion_limit <= 0:
raise RecursionError(
f"Recursion limit reached when invoking {self} with input {input}."
)
output = await output.ainvoke(
input,
patch_config(
config,
callbacks=run_manager.get_child(),
recursion_limit=recursion_limit - 1,
),
)
return output
def _config(
self, config: Optional[RunnableConfig], callable: Callable[..., Any]
) -> RunnableConfig:
config = config or {}
if config.get("run_name") is None:
try:
run_name = callable.__name__
except AttributeError:
run_name = None
if run_name is not None:
return patch_config(config, run_name=run_name)
return config
def invoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
"""Invoke this runnable synchronously."""
if hasattr(self, "func"):
return self._call_with_config(
self._invoke,
input,
self._config(config, self.func),
**kwargs,
)
else:
raise TypeError(
"Cannot invoke a coroutine function synchronously."
"Use `ainvoke` instead."
)
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
"""Invoke this runnable asynchronously."""
if hasattr(self, "afunc"):
return await self._acall_with_config(
self._ainvoke,
input,
self._config(config, self.afunc),
**kwargs,
)
else:
# Delegating to super implementation of ainvoke.
# Uses asyncio executor to run the sync version (invoke)
return await super().ainvoke(input, config)
class RunnableEachBase(RunnableSerializable[List[Input], List[Output]]):
"""
A runnable that delegates calls to another runnable
with each element of the input sequence.
Use only if creating a new RunnableEach subclass with different __init__ args.
"""
bound: Runnable[Input, Output]
class Config:
arbitrary_types_allowed = True
@property
def InputType(self) -> Any:
return List[self.bound.InputType] # type: ignore[name-defined]
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
return create_model(
"RunnableEachInput",
__root__=(
List[self.bound.get_input_schema(config)], # type: ignore
None,
),
)
@property
def OutputType(self) -> Type[List[Output]]:
return List[self.bound.OutputType] # type: ignore[name-defined]
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
schema = self.bound.get_output_schema(config)
return create_model(
"RunnableEachOutput",
__root__=(
List[schema], # type: ignore
None,
),
)
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return self.bound.config_specs
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
def _invoke(
self,
inputs: List[Input],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> List[Output]:
return self.bound.batch(
inputs, patch_config(config, callbacks=run_manager.get_child()), **kwargs
)
def invoke(
self, input: List[Input], config: Optional[RunnableConfig] = None, **kwargs: Any
) -> List[Output]:
return self._call_with_config(self._invoke, input, config, **kwargs)
async def _ainvoke(
self,
inputs: List[Input],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
**kwargs: Any,
) -> List[Output]:
return await self.bound.abatch(
inputs, patch_config(config, callbacks=run_manager.get_child()), **kwargs
)
async def ainvoke(
self, input: List[Input], config: Optional[RunnableConfig] = None, **kwargs: Any
) -> List[Output]:
return await self._acall_with_config(self._ainvoke, input, config, **kwargs)
class RunnableEach(RunnableEachBase[Input, Output]):
"""
A runnable that delegates calls to another runnable
with each element of the input sequence.
"""
def bind(self, **kwargs: Any) -> RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.bind(**kwargs))
def with_config(
self, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> RunnableEach[Input, Output]:
return RunnableEach(bound=self.bound.with_config(config, **kwargs))
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
) -> RunnableEach[Input, Output]:
"""
Bind lifecycle listeners to a Runnable, returning a new Runnable.
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
return RunnableEach(
bound=self.bound.with_listeners(
on_start=on_start, on_end=on_end, on_error=on_error
)
)
class RunnableBindingBase(RunnableSerializable[Input, Output]):
"""A runnable that delegates calls to another runnable with a set of kwargs.
Use only if creating a new RunnableBinding subclass with different __init__ args.
See documentation for RunnableBinding for more details.
"""
bound: Runnable[Input, Output]
"""The underlying runnable that this runnable delegates to."""
kwargs: Mapping[str, Any] = Field(default_factory=dict)
"""kwargs to pass to the underlying runnable when running.
For example, when the runnable binding is invoked the underlying
runnable will be invoked with the same input but with these additional
kwargs.
"""
config: RunnableConfig = Field(default_factory=dict)
"""The config to bind to the underlying runnable."""
config_factories: List[Callable[[RunnableConfig], RunnableConfig]] = Field(
default_factory=list
)
"""The config factories to bind to the underlying runnable."""
# Union[Type[Input], BaseModel] + things like List[str]
custom_input_type: Optional[Any] = None
"""Override the input type of the underlying runnable with a custom type.
The type can be a pydantic model, or a type annotation (e.g., `List[str]`).
"""
# Union[Type[Output], BaseModel] + things like List[str]
custom_output_type: Optional[Any] = None
"""Override the output type of the underlying runnable with a custom type.
The type can be a pydantic model, or a type annotation (e.g., `List[str]`).
"""
class Config:
arbitrary_types_allowed = True
def __init__(
self,
*,
bound: Runnable[Input, Output],
kwargs: Optional[Mapping[str, Any]] = None,
config: Optional[RunnableConfig] = None,
config_factories: Optional[
List[Callable[[RunnableConfig], RunnableConfig]]
] = None,
custom_input_type: Optional[Union[Type[Input], BaseModel]] = None,
custom_output_type: Optional[Union[Type[Output], BaseModel]] = None,
**other_kwargs: Any,
) -> None:
"""Create a RunnableBinding from a runnable and kwargs.
Args:
bound: The underlying runnable that this runnable delegates calls to.
kwargs: optional kwargs to pass to the underlying runnable, when running
the underlying runnable (e.g., via `invoke`, `batch`,
`transform`, or `stream` or async variants)
config: config_factories:
config_factories: optional list of config factories to apply to the
custom_input_type: Specify to override the input type of the underlying
runnable with a custom type.
custom_output_type: Specify to override the output type of the underlying
runnable with a custom type.
**other_kwargs: Unpacked into the base class.
"""
config = config or {}
# config_specs contains the list of valid `configurable` keys
if configurable := config.get("configurable", None):
allowed_keys = set(s.id for s in bound.config_specs)
for key in configurable:
if key not in allowed_keys:
raise ValueError(
f"Configurable key '{key}' not found in runnable with"
f" config keys: {allowed_keys}"
)
super().__init__(
bound=bound,
kwargs=kwargs or {},
config=config or {},
config_factories=config_factories or [],
custom_input_type=custom_input_type,
custom_output_type=custom_output_type,
**other_kwargs,
)
@property
def InputType(self) -> Type[Input]:
return (
cast(Type[Input], self.custom_input_type)
if self.custom_input_type is not None
else self.bound.InputType
)
@property
def OutputType(self) -> Type[Output]:
return (
cast(Type[Output], self.custom_output_type)
if self.custom_output_type is not None
else self.bound.OutputType
)
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
if self.custom_input_type is not None:
return super().get_input_schema(config)
return self.bound.get_input_schema(merge_configs(self.config, config))
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
if self.custom_output_type is not None:
return super().get_output_schema(config)
return self.bound.get_output_schema(merge_configs(self.config, config))
@property
def config_specs(self) -> List[ConfigurableFieldSpec]:
return self.bound.config_specs
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return cls.__module__.split(".")[:-1]
def _merge_configs(self, *configs: Optional[RunnableConfig]) -> RunnableConfig:
config = merge_configs(self.config, *configs)
return merge_configs(config, *(f(config) for f in self.config_factories))
def invoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
return self.bound.invoke(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
)
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Output:
return await self.bound.ainvoke(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
)
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
if isinstance(config, list):
configs = cast(
List[RunnableConfig],
[self._merge_configs(conf) for conf in config],
)
else:
configs = [self._merge_configs(config) for _ in range(len(inputs))]
return self.bound.batch(
inputs,
configs,
return_exceptions=return_exceptions,
**{**self.kwargs, **kwargs},
)
async def abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
if isinstance(config, list):
configs = cast(
List[RunnableConfig],
[self._merge_configs(conf) for conf in config],
)
else:
configs = [self._merge_configs(config) for _ in range(len(inputs))]
return await self.bound.abatch(
inputs,
configs,
return_exceptions=return_exceptions,
**{**self.kwargs, **kwargs},
)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
yield from self.bound.stream(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
async for item in self.bound.astream(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
):
yield item
def transform(
self,
input: Iterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Iterator[Output]:
yield from self.bound.transform(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
)
async def atransform(
self,
input: AsyncIterator[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> AsyncIterator[Output]:
async for item in self.bound.atransform(
input,
self._merge_configs(config),
**{**self.kwargs, **kwargs},
):
yield item
RunnableBindingBase.update_forward_refs(RunnableConfig=RunnableConfig)
class RunnableBinding(RunnableBindingBase[Input, Output]):
"""Wrap a runnable with additional functionality.
A RunnableBinding can be thought of as a "runnable decorator" that
preserves the essential features of Runnable; i.e., batching, streaming,
and async support, while adding additional functionality.
Any class that inherits from Runnable can be bound to a `RunnableBinding`.
Runnables expose a standard set of methods for creating `RunnableBindings`
or sub-classes of `RunnableBindings` (e.g., `RunnableRetry`,
`RunnableWithFallbacks`) that add additional functionality.
These methods include:
- `bind`: Bind kwargs to pass to the underlying runnable when running it.
- `with_config`: Bind config to pass to the underlying runnable when running it.
- `with_listeners`: Bind lifecycle listeners to the underlying runnable.
- `with_types`: Override the input and output types of the underlying runnable.
- `with_retry`: Bind a retry policy to the underlying runnable.
- `with_fallbacks`: Bind a fallback policy to the underlying runnable.
Example:
`bind`: Bind kwargs to pass to the underlying runnable when running it.
.. code-block:: python
# Create a runnable binding that invokes the ChatModel with the
# additional kwarg `stop=['-']` when running it.
from langchain.chat_models import ChatOpenAI
model = ChatOpenAI()
model.invoke('Say "Parrot-MAGIC"', stop=['-']) # Should return `Parrot`
# Using it the easy way via `bind` method which returns a new
# RunnableBinding
runnable_binding = model.bind(stop=['-'])
runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
Can also be done by instantiating a RunnableBinding directly (not recommended):
.. code-block:: python
from langchain.schema.runnable import RunnableBinding
runnable_binding = RunnableBinding(
bound=model,
kwargs={'stop': ['-']} # <-- Note the additional kwargs
)
runnable_binding.invoke('Say "Parrot-MAGIC"') # Should return `Parrot`
"""
def bind(self, **kwargs: Any) -> Runnable[Input, Output]:
"""Bind additional kwargs to a Runnable, returning a new Runnable.
Args:
**kwargs: The kwargs to bind to the Runnable.
Returns:
A new Runnable with the same type and config as the original,
but with the additional kwargs bound.
"""
return self.__class__(
bound=self.bound,
config=self.config,
kwargs={**self.kwargs, **kwargs},
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
def with_config(
self,
config: Optional[RunnableConfig] = None,
# Sadly Unpack is not well supported by mypy so this will have to be untyped
**kwargs: Any,
) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=cast(RunnableConfig, {**self.config, **(config or {}), **kwargs}),
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
def with_listeners(
self,
*,
on_start: Optional[Listener] = None,
on_end: Optional[Listener] = None,
on_error: Optional[Listener] = None,
) -> Runnable[Input, Output]:
"""Bind lifecycle listeners to a Runnable, returning a new Runnable.
Args:
on_start: Called before the runnable starts running, with the Run object.
on_end: Called after the runnable finishes running, with the Run object.
on_error: Called if the runnable throws an error, with the Run object.
Returns:
The Run object contains information about the run, including its id,
type, input, output, error, start_time, end_time, and any tags or metadata
added to the run.
"""
from langchain_core.tracers.root_listeners import RootListenersTracer
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=self.config,
config_factories=[
lambda config: {
"callbacks": [
RootListenersTracer(
config=config,
on_start=on_start,
on_end=on_end,
on_error=on_error,
)
],
}
],
custom_input_type=self.custom_input_type,
custom_output_type=self.custom_output_type,
)
def with_types(
self,
input_type: Optional[Union[Type[Input], BaseModel]] = None,
output_type: Optional[Union[Type[Output], BaseModel]] = None,
) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound,
kwargs=self.kwargs,
config=self.config,
custom_input_type=input_type
if input_type is not None
else self.custom_input_type,
custom_output_type=output_type
if output_type is not None
else self.custom_output_type,
)
def with_retry(self, **kwargs: Any) -> Runnable[Input, Output]:
return self.__class__(
bound=self.bound.with_retry(**kwargs),
kwargs=self.kwargs,
config=self.config,
)
RunnableLike = Union[
Runnable[Input, Output],
Callable[[Input], Output],
Callable[[Input], Awaitable[Output]],
Callable[[Iterator[Input]], Iterator[Output]],
Callable[[AsyncIterator[Input]], AsyncIterator[Output]],
Mapping[str, Any],
]
def coerce_to_runnable(thing: RunnableLike) -> Runnable[Input, Output]:
"""Coerce a runnable-like object into a Runnable.
Args:
thing: A runnable-like object.
Returns:
A Runnable.
"""
if isinstance(thing, Runnable):
return thing
elif inspect.isasyncgenfunction(thing) or inspect.isgeneratorfunction(thing):
return RunnableGenerator(thing)
elif callable(thing):
return RunnableLambda(cast(Callable[[Input], Output], thing))
elif isinstance(thing, dict):
return cast(Runnable[Input, Output], RunnableParallel(thing))
else:
raise TypeError(
f"Expected a Runnable, callable or dict."
f"Instead got an unsupported type: {type(thing)}"
)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,407 | RunnableLambda: returned runnable called synchronously when using ainvoke | ### System Info
langchain on master branch
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [X] Chains
- [ ] Callbacks/Tracing
- [X] Async
### Reproduction
```
from langchain.schema.runnable import RunnableLambda
import asyncio
def idchain_sync(__input):
print(f'sync chain call: {__input}')
return __input
async def idchain_async(__input):
print(f'async chain call: {__input}')
return __input
idchain = RunnableLambda(func=idchain_sync,afunc=idchain_async)
def func(__input):
return idchain
asyncio.run(RunnableLambda(func).ainvoke('toto'))
#printss 'sync chain call: toto' instead of 'async chain call: toto'
```
### Expected behavior
LCEL's route can cause chains to be silently run synchronously, while the user uses ainvoke...
When calling a RunnableLambda A returning a chain B with ainvoke, we would expect the new chain B to be called with ainvoke;
However, if the function provided to RunnableLambda A is not async, then the chain B will be called with invoke, silently causing all the rest of the chain to be called synchronously.
| https://github.com/langchain-ai/langchain/issues/13407 | https://github.com/langchain-ai/langchain/pull/13408 | 391f200eaab9af587eea902b264f2d3af929b268 | e17edc4d0b1dccc98df2a7d1db86bfcc0eb66ca5 | "2023-11-15T17:27:49Z" | python | "2023-11-28T11:18:26Z" | libs/core/tests/unit_tests/runnables/test_runnable.py | import sys
from functools import partial
from operator import itemgetter
from typing import (
Any,
AsyncIterator,
Dict,
Iterator,
List,
Optional,
Sequence,
Union,
cast,
)
from uuid import UUID
import pytest
from freezegun import freeze_time
from pytest_mock import MockerFixture
from syrupy import SnapshotAssertion
from typing_extensions import TypedDict
from langchain_core.callbacks.manager import (
Callbacks,
atrace_as_chain_group,
trace_as_chain_group,
)
from langchain_core.documents import Document
from langchain_core.load import dumpd, dumps
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers import (
BaseOutputParser,
CommaSeparatedListOutputParser,
StrOutputParser,
)
from langchain_core.prompt_values import ChatPromptValue, StringPromptValue
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
PromptTemplate,
SystemMessagePromptTemplate,
)
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import (
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
RouterRunnable,
Runnable,
RunnableBinding,
RunnableBranch,
RunnableConfig,
RunnableGenerator,
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
RunnableSequence,
RunnableWithFallbacks,
add,
)
from langchain_core.tools import BaseTool, tool
from langchain_core.tracers import (
BaseTracer,
ConsoleCallbackHandler,
Run,
RunLog,
RunLogPatch,
)
from langchain_core.tracers.context import collect_runs
from tests.unit_tests.fake.chat_model import FakeListChatModel
from tests.unit_tests.fake.llm import FakeListLLM, FakeStreamingListLLM
class FakeTracer(BaseTracer):
"""Fake tracer that records LangChain execution.
It replaces run ids with deterministic UUIDs for snapshotting."""
def __init__(self) -> None:
"""Initialize the tracer."""
super().__init__()
self.runs: List[Run] = []
self.uuids_map: Dict[UUID, UUID] = {}
self.uuids_generator = (
UUID(f"00000000-0000-4000-8000-{i:012}", version=4) for i in range(10000)
)
def _replace_uuid(self, uuid: UUID) -> UUID:
if uuid not in self.uuids_map:
self.uuids_map[uuid] = next(self.uuids_generator)
return self.uuids_map[uuid]
def _copy_run(self, run: Run) -> Run:
return run.copy(
update={
"id": self._replace_uuid(run.id),
"parent_run_id": self.uuids_map[run.parent_run_id]
if run.parent_run_id
else None,
"child_runs": [self._copy_run(child) for child in run.child_runs],
"execution_order": None,
"child_execution_order": None,
}
)
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
self.runs.append(self._copy_run(run))
class FakeRunnable(Runnable[str, int]):
def invoke(
self,
input: str,
config: Optional[RunnableConfig] = None,
) -> int:
return len(input)
class FakeRetriever(BaseRetriever):
def _get_relevant_documents(
self,
query: str,
*,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
async def _aget_relevant_documents(
self,
query: str,
*,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
return [Document(page_content="foo"), Document(page_content="bar")]
def test_schemas(snapshot: SnapshotAssertion) -> None:
fake = FakeRunnable() # str -> int
assert fake.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
assert fake.config_schema(include=["tags", "metadata", "run_name"]).schema() == {
"title": "FakeRunnableConfig",
"type": "object",
"properties": {
"metadata": {"title": "Metadata", "type": "object"},
"run_name": {"title": "Run Name", "type": "string"},
"tags": {"items": {"type": "string"}, "title": "Tags", "type": "array"},
},
}
fake_bound = FakeRunnable().bind(a="b") # str -> int
assert fake_bound.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_bound.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
fake_w_fallbacks = FakeRunnable().with_fallbacks((fake,)) # str -> int
assert fake_w_fallbacks.input_schema.schema() == {
"title": "FakeRunnableInput",
"type": "string",
}
assert fake_w_fallbacks.output_schema.schema() == {
"title": "FakeRunnableOutput",
"type": "integer",
}
def typed_lambda_impl(x: str) -> int:
return len(x)
typed_lambda = RunnableLambda(typed_lambda_impl) # str -> int
assert typed_lambda.input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "string",
}
assert typed_lambda.output_schema.schema() == {
"title": "RunnableLambdaOutput",
"type": "integer",
}
async def typed_async_lambda_impl(x: str) -> int:
return len(x)
typed_async_lambda: Runnable = RunnableLambda(typed_async_lambda_impl) # str -> int
assert typed_async_lambda.input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "string",
}
assert typed_async_lambda.output_schema.schema() == {
"title": "RunnableLambdaOutput",
"type": "integer",
}
fake_ret = FakeRetriever() # str -> List[Document]
assert fake_ret.input_schema.schema() == {
"title": "FakeRetrieverInput",
"type": "string",
}
assert fake_ret.output_schema.schema() == {
"title": "FakeRetrieverOutput",
"type": "array",
"items": {"$ref": "#/definitions/Document"},
"definitions": {
"Document": {
"title": "Document",
"description": "Class for storing a piece of text and associated metadata.", # noqa: E501
"type": "object",
"properties": {
"page_content": {"title": "Page Content", "type": "string"},
"metadata": {"title": "Metadata", "type": "object"},
"type": {
"title": "Type",
"enum": ["Document"],
"default": "Document",
"type": "string",
},
},
"required": ["page_content"],
}
},
}
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
assert fake_llm.input_schema.schema() == snapshot
assert fake_llm.output_schema.schema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
fake_chat = FakeListChatModel(responses=["a"]) # str -> List[List[str]]
assert fake_chat.input_schema.schema() == snapshot
assert fake_chat.output_schema.schema() == snapshot
chat_prompt = ChatPromptTemplate.from_messages(
[
MessagesPlaceholder(variable_name="history"),
("human", "Hello, how are you?"),
]
)
assert chat_prompt.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"history": {
"title": "History",
"type": "array",
"items": {
"anyOf": [
{"$ref": "#/definitions/AIMessage"},
{"$ref": "#/definitions/HumanMessage"},
{"$ref": "#/definitions/ChatMessage"},
{"$ref": "#/definitions/SystemMessage"},
{"$ref": "#/definitions/FunctionMessage"},
{"$ref": "#/definitions/ToolMessage"},
]
},
}
},
"definitions": {
"AIMessage": {
"title": "AIMessage",
"description": "A Message from an AI.",
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "ai",
"enum": ["ai"],
"type": "string",
},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"HumanMessage": {
"title": "HumanMessage",
"description": "A Message from a human.",
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "human",
"enum": ["human"],
"type": "string",
},
"example": {
"title": "Example",
"default": False,
"type": "boolean",
},
},
"required": ["content"],
},
"ChatMessage": {
"title": "ChatMessage",
"description": "A Message that can be assigned an arbitrary speaker (i.e. role).", # noqa
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "chat",
"enum": ["chat"],
"type": "string",
},
"role": {"title": "Role", "type": "string"},
},
"required": ["content", "role"],
},
"SystemMessage": {
"title": "SystemMessage",
"description": "A Message for priming AI behavior, usually passed in as the first of a sequence\nof input messages.", # noqa
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "system",
"enum": ["system"],
"type": "string",
},
},
"required": ["content"],
},
"FunctionMessage": {
"title": "FunctionMessage",
"description": "A Message for passing the result of executing a function back to a model.", # noqa
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "function",
"enum": ["function"],
"type": "string",
},
"name": {"title": "Name", "type": "string"},
},
"required": ["content", "name"],
},
"ToolMessage": {
"title": "ToolMessage",
"description": "A Message for passing the result of executing a tool back to a model.", # noqa
"type": "object",
"properties": {
"content": {
"title": "Content",
"anyOf": [
{"type": "string"},
{
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "object"}]
},
},
],
},
"additional_kwargs": {
"title": "Additional Kwargs",
"type": "object",
},
"type": {
"title": "Type",
"default": "tool",
"enum": ["tool"],
"type": "string",
},
"tool_call_id": {"title": "Tool Call Id", "type": "string"},
},
"required": ["content", "tool_call_id"],
},
},
}
assert chat_prompt.output_schema.schema() == snapshot
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert prompt.output_schema.schema() == snapshot
prompt_mapper = PromptTemplate.from_template("Hello, {name}!").map()
assert prompt_mapper.input_schema.schema() == {
"definitions": {
"PromptInput": {
"properties": {"name": {"title": "Name", "type": "string"}},
"title": "PromptInput",
"type": "object",
}
},
"items": {"$ref": "#/definitions/PromptInput"},
"type": "array",
"title": "RunnableEachInput",
}
assert prompt_mapper.output_schema.schema() == snapshot
list_parser = CommaSeparatedListOutputParser()
assert list_parser.input_schema.schema() == snapshot
assert list_parser.output_schema.schema() == {
"title": "CommaSeparatedListOutputParserOutput",
"type": "array",
"items": {"type": "string"},
}
seq = prompt | fake_llm | list_parser
assert seq.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert seq.output_schema.schema() == {
"type": "array",
"items": {"type": "string"},
"title": "CommaSeparatedListOutputParserOutput",
}
router: Runnable = RouterRunnable({})
assert router.input_schema.schema() == {
"title": "RouterRunnableInput",
"$ref": "#/definitions/RouterInput",
"definitions": {
"RouterInput": {
"title": "RouterInput",
"type": "object",
"properties": {
"key": {"title": "Key", "type": "string"},
"input": {"title": "Input"},
},
"required": ["key", "input"],
}
},
}
assert router.output_schema.schema() == {"title": "RouterRunnableOutput"}
seq_w_map: Runnable = (
prompt
| fake_llm
| {
"original": RunnablePassthrough(input_type=str),
"as_list": list_parser,
"length": typed_lambda_impl,
}
)
assert seq_w_map.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"name": {"title": "Name", "type": "string"}},
}
assert seq_w_map.output_schema.schema() == {
"title": "RunnableParallelOutput",
"type": "object",
"properties": {
"original": {"title": "Original", "type": "string"},
"length": {"title": "Length", "type": "integer"},
"as_list": {
"title": "As List",
"type": "array",
"items": {"type": "string"},
},
},
}
def test_passthrough_assign_schema() -> None:
retriever = FakeRetriever() # str -> List[Document]
prompt = PromptTemplate.from_template("{context} {question}")
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
seq_w_assign: Runnable = (
RunnablePassthrough.assign(context=itemgetter("question") | retriever)
| prompt
| fake_llm
)
assert seq_w_assign.input_schema.schema() == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "RunnableSequenceInput",
"type": "object",
}
assert seq_w_assign.output_schema.schema() == {
"title": "FakeListLLMOutput",
"type": "string",
}
invalid_seq_w_assign: Runnable = (
RunnablePassthrough.assign(context=itemgetter("question") | retriever)
| fake_llm
)
# fallback to RunnableAssign.input_schema if next runnable doesn't have
# expected dict input_schema
assert invalid_seq_w_assign.input_schema.schema() == {
"properties": {"question": {"title": "Question"}},
"title": "RunnableParallelInput",
"type": "object",
}
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
def test_lambda_schemas() -> None:
first_lambda = lambda x: x["hello"] # noqa: E731
assert RunnableLambda(first_lambda).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}},
}
second_lambda = lambda x, y: (x["hello"], x["bye"], y["bah"]) # noqa: E731
assert RunnableLambda(
second_lambda, # type: ignore[arg-type]
).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"hello": {"title": "Hello"}, "bye": {"title": "Bye"}},
}
def get_value(input): # type: ignore[no-untyped-def]
return input["variable_name"]
assert RunnableLambda(get_value).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {"variable_name": {"title": "Variable Name"}},
}
async def aget_value(input): # type: ignore[no-untyped-def]
return (input["variable_name"], input.get("another"))
assert RunnableLambda(aget_value).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {
"another": {"title": "Another"},
"variable_name": {"title": "Variable Name"},
},
}
async def aget_values(input): # type: ignore[no-untyped-def]
return {
"hello": input["variable_name"],
"bye": input["variable_name"],
"byebye": input["yo"],
}
assert RunnableLambda(aget_values).input_schema.schema() == {
"title": "RunnableLambdaInput",
"type": "object",
"properties": {
"variable_name": {"title": "Variable Name"},
"yo": {"title": "Yo"},
},
}
class InputType(TypedDict):
variable_name: str
yo: int
class OutputType(TypedDict):
hello: str
bye: str
byebye: int
async def aget_values_typed(input: InputType) -> OutputType:
return {
"hello": input["variable_name"],
"bye": input["variable_name"],
"byebye": input["yo"],
}
assert (
RunnableLambda(aget_values_typed).input_schema.schema() # type: ignore[arg-type]
== {
"title": "RunnableLambdaInput",
"$ref": "#/definitions/InputType",
"definitions": {
"InputType": {
"properties": {
"variable_name": {
"title": "Variable " "Name",
"type": "string",
},
"yo": {"title": "Yo", "type": "integer"},
},
"required": ["variable_name", "yo"],
"title": "InputType",
"type": "object",
}
},
}
)
assert RunnableLambda(aget_values_typed).output_schema.schema() == { # type: ignore[arg-type]
"title": "RunnableLambdaOutput",
"$ref": "#/definitions/OutputType",
"definitions": {
"OutputType": {
"properties": {
"bye": {"title": "Bye", "type": "string"},
"byebye": {"title": "Byebye", "type": "integer"},
"hello": {"title": "Hello", "type": "string"},
},
"required": ["hello", "bye", "byebye"],
"title": "OutputType",
"type": "object",
}
},
}
def test_with_types_with_type_generics() -> None:
"""Verify that with_types works if we use things like List[int]"""
def foo(x: int) -> None:
"""Add one to the input."""
raise NotImplementedError()
# Try specifying some
RunnableLambda(foo).with_types(
output_type=List[int], # type: ignore[arg-type]
input_type=List[int], # type: ignore[arg-type]
)
RunnableLambda(foo).with_types(
output_type=Sequence[int], # type: ignore[arg-type]
input_type=Sequence[int], # type: ignore[arg-type]
)
def test_schema_complex_seq() -> None:
prompt1 = ChatPromptTemplate.from_template("what is the city {person} is from?")
prompt2 = ChatPromptTemplate.from_template(
"what country is the city {city} in? respond in {language}"
)
model = FakeListChatModel(responses=[""])
chain1 = prompt1 | model | StrOutputParser()
chain2: Runnable = (
{"city": chain1, "language": itemgetter("language")}
| prompt2
| model
| StrOutputParser()
)
assert chain2.input_schema.schema() == {
"title": "RunnableParallelInput",
"type": "object",
"properties": {
"person": {"title": "Person", "type": "string"},
"language": {"title": "Language"},
},
}
assert chain2.output_schema.schema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
assert chain2.with_types(input_type=str).input_schema.schema() == {
"title": "RunnableBindingInput",
"type": "string",
}
assert chain2.with_types(input_type=int).output_schema.schema() == {
"title": "StrOutputParserOutput",
"type": "string",
}
class InputType(BaseModel):
person: str
assert chain2.with_types(input_type=InputType).input_schema.schema() == {
"title": "InputType",
"type": "object",
"properties": {"person": {"title": "Person", "type": "string"}},
"required": ["person"],
}
def test_configurable_fields() -> None:
fake_llm = FakeListLLM(responses=["a"]) # str -> List[List[str]]
assert fake_llm.invoke("...") == "a"
fake_llm_configurable = fake_llm.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
assert fake_llm_configurable.invoke("...") == "a"
assert fake_llm_configurable.config_schema().schema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
}
},
}
},
}
fake_llm_configured = fake_llm_configurable.with_config(
configurable={"llm_responses": ["b"]}
)
assert fake_llm_configured.invoke("...") == "b"
prompt = PromptTemplate.from_template("Hello, {name}!")
assert prompt.invoke({"name": "John"}) == StringPromptValue(text="Hello, John!")
prompt_configurable = prompt.configurable_fields(
template=ConfigurableField(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
)
)
assert prompt_configurable.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John!"
)
assert prompt_configurable.config_schema().schema() == {
"title": "RunnableConfigurableFieldsConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
}
},
}
},
}
prompt_configured = prompt_configurable.with_config(
configurable={"prompt_template": "Hello, {name}! {name}!"}
)
assert prompt_configured.invoke({"name": "John"}) == StringPromptValue(
text="Hello, John! John!"
)
assert prompt_configurable.with_config(
configurable={"prompt_template": "Hello {name} in {lang}"}
).input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
}
chain_configurable = prompt_configurable | fake_llm_configurable | StrOutputParser()
assert chain_configurable.invoke({"name": "John"}) == "a"
assert chain_configurable.config_schema().schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
},
},
}
},
}
assert (
chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).invoke({"name": "John", "lang": "en"})
== "c"
)
assert chain_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name} {lang}!",
"llm_responses": ["c"],
}
).input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {
"lang": {"title": "Lang", "type": "string"},
"name": {"title": "Name", "type": "string"},
},
}
chain_with_map_configurable: Runnable = prompt_configurable | {
"llm1": fake_llm_configurable | StrOutputParser(),
"llm2": fake_llm_configurable | StrOutputParser(),
"llm3": fake_llm.configurable_fields(
responses=ConfigurableField("other_responses")
)
| StrOutputParser(),
}
assert chain_with_map_configurable.invoke({"name": "John"}) == {
"llm1": "a",
"llm2": "a",
"llm3": "a",
}
assert chain_with_map_configurable.config_schema().schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"other_responses": {
"title": "Other Responses",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "Hello, {name}!",
"type": "string",
},
},
}
},
}
assert chain_with_map_configurable.with_config(
configurable={
"prompt_template": "A very good morning to you, {name}!",
"llm_responses": ["c"],
"other_responses": ["d"],
}
).invoke({"name": "John"}) == {"llm1": "c", "llm2": "c", "llm3": "d"}
def test_configurable_alts_factory() -> None:
fake_llm = FakeListLLM(responses=["a"]).configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=partial(FakeListLLM, responses=["b"]),
)
assert fake_llm.invoke("...") == "a"
assert fake_llm.with_config(configurable={"llm": "chat"}).invoke("...") == "b"
def test_configurable_fields_prefix_keys() -> None:
fake_chat = FakeListChatModel(responses=["b"]).configurable_fields(
responses=ConfigurableFieldMultiOption(
id="responses",
name="Chat Responses",
options={
"hello": "A good morning to you!",
"bye": "See you later!",
"helpful": "How can I help you?",
},
default=["hello", "bye"],
),
# (sleep is a configurable field in FakeListChatModel)
sleep=ConfigurableField(
id="chat_sleep",
is_shared=True,
),
)
fake_llm = (
FakeListLLM(responses=["a"])
.configurable_fields(
responses=ConfigurableField(
id="responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
.configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=fake_chat | StrOutputParser(),
prefix_keys=True,
)
)
prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields(
template=ConfigurableFieldSingleOption(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
options={
"hello": "Hello, {name}!",
"good_morning": "A very good morning to you, {name}!",
},
default="hello",
)
)
chain = prompt | fake_llm
assert chain.config_schema().schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"LLM": {
"title": "LLM",
"description": "An enumeration.",
"enum": ["chat", "default"],
"type": "string",
},
"Chat_Responses": {
"title": "Chat Responses",
"description": "An enumeration.",
"enum": ["hello", "bye", "helpful"],
"type": "string",
},
"Prompt_Template": {
"title": "Prompt Template",
"description": "An enumeration.",
"enum": ["hello", "good_morning"],
"type": "string",
},
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "hello",
"allOf": [{"$ref": "#/definitions/Prompt_Template"}],
},
"llm": {
"title": "LLM",
"default": "default",
"allOf": [{"$ref": "#/definitions/LLM"}],
},
# not prefixed because marked as shared
"chat_sleep": {
"title": "Chat Sleep",
"type": "number",
},
# prefixed for "chat" option
"llm==chat/responses": {
"title": "Chat Responses",
"default": ["hello", "bye"],
"type": "array",
"items": {"$ref": "#/definitions/Chat_Responses"},
},
# prefixed for "default" option
"llm==default/responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
},
},
},
}
def test_configurable_fields_example() -> None:
fake_chat = FakeListChatModel(responses=["b"]).configurable_fields(
responses=ConfigurableFieldMultiOption(
id="chat_responses",
name="Chat Responses",
options={
"hello": "A good morning to you!",
"bye": "See you later!",
"helpful": "How can I help you?",
},
default=["hello", "bye"],
)
)
fake_llm = (
FakeListLLM(responses=["a"])
.configurable_fields(
responses=ConfigurableField(
id="llm_responses",
name="LLM Responses",
description="A list of fake responses for this LLM",
)
)
.configurable_alternatives(
ConfigurableField(id="llm", name="LLM"),
chat=fake_chat | StrOutputParser(),
)
)
prompt = PromptTemplate.from_template("Hello, {name}!").configurable_fields(
template=ConfigurableFieldSingleOption(
id="prompt_template",
name="Prompt Template",
description="The prompt template for this chain",
options={
"hello": "Hello, {name}!",
"good_morning": "A very good morning to you, {name}!",
},
default="hello",
)
)
# deduplication of configurable fields
chain_configurable = prompt | fake_llm | (lambda x: {"name": x}) | prompt | fake_llm
assert chain_configurable.invoke({"name": "John"}) == "a"
assert chain_configurable.config_schema().schema() == {
"title": "RunnableSequenceConfig",
"type": "object",
"properties": {"configurable": {"$ref": "#/definitions/Configurable"}},
"definitions": {
"LLM": {
"title": "LLM",
"description": "An enumeration.",
"enum": ["chat", "default"],
"type": "string",
},
"Chat_Responses": {
"description": "An enumeration.",
"enum": ["hello", "bye", "helpful"],
"title": "Chat Responses",
"type": "string",
},
"Prompt_Template": {
"description": "An enumeration.",
"enum": ["hello", "good_morning"],
"title": "Prompt Template",
"type": "string",
},
"Configurable": {
"title": "Configurable",
"type": "object",
"properties": {
"chat_responses": {
"default": ["hello", "bye"],
"items": {"$ref": "#/definitions/Chat_Responses"},
"title": "Chat Responses",
"type": "array",
},
"llm": {
"title": "LLM",
"default": "default",
"allOf": [{"$ref": "#/definitions/LLM"}],
},
"llm_responses": {
"title": "LLM Responses",
"description": "A list of fake responses for this LLM",
"default": ["a"],
"type": "array",
"items": {"type": "string"},
},
"prompt_template": {
"title": "Prompt Template",
"description": "The prompt template for this chain",
"default": "hello",
"allOf": [{"$ref": "#/definitions/Prompt_Template"}],
},
},
},
},
}
with pytest.raises(ValueError):
chain_configurable.with_config(configurable={"llm123": "chat"})
assert (
chain_configurable.with_config(configurable={"llm": "chat"}).invoke(
{"name": "John"}
)
== "A good morning to you!"
)
assert (
chain_configurable.with_config(
configurable={"llm": "chat", "chat_responses": ["helpful"]}
).invoke({"name": "John"})
== "How can I help you?"
)
async def test_passthrough_tap_async(mocker: MockerFixture) -> None:
fake = FakeRunnable()
mock = mocker.Mock()
seq: Runnable = fake | RunnablePassthrough(mock)
assert await seq.ainvoke("hello") == 5
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert [
part async for part in seq.astream("hello", dict(metadata={"key": "value"}))
] == [5]
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert seq.invoke("hello") == 5
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
assert [part for part in seq.stream("hello", dict(metadata={"key": "value"}))] == [
5
]
assert mock.call_args_list == [mocker.call(5)]
mock.reset_mock()
async def test_with_config(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.with_config(tags=["a-tag"]).invoke("hello") == 5
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"])),
]
spy.reset_mock()
fake_1: Runnable = RunnablePassthrough()
fake_2: Runnable = RunnablePassthrough()
spy_seq_step = mocker.spy(fake_1.__class__, "invoke")
sequence = fake_1.with_config(tags=["a-tag"]) | fake_2.with_config(
tags=["b-tag"], max_concurrency=5
)
assert sequence.invoke("hello") == "hello"
assert len(spy_seq_step.call_args_list) == 2
for i, call in enumerate(spy_seq_step.call_args_list):
assert call.args[1] == "hello"
if i == 0:
assert call.args[2].get("tags") == ["a-tag"]
assert call.args[2].get("max_concurrency") is None
else:
assert call.args[2].get("tags") == ["b-tag"]
assert call.args[2].get("max_concurrency") == 5
mocker.stop(spy_seq_step)
assert [
*fake.with_config(tags=["a-tag"]).stream(
"hello", dict(metadata={"key": "value"})
)
] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"], metadata={"key": "value"})),
]
spy.reset_mock()
assert fake.with_config(recursion_limit=5).batch(
["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})]
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(
sorted(spy.call_args_list, key=lambda x: 0 if x.args[0] == "hello" else 1)
):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("recursion_limit") == 5
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.with_config(metadata={"a": "b"}).batch(
["hello", "wooorld"], dict(tags=["a-tag"])
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {"a": "b"}
spy.reset_mock()
handler = ConsoleCallbackHandler()
assert (
await fake.with_config(metadata={"a": "b"}).ainvoke(
"hello", config={"callbacks": [handler]}
)
== 5
)
assert spy.call_args_list == [
mocker.call("hello", dict(callbacks=[handler], metadata={"a": "b"})),
]
spy.reset_mock()
assert [
part async for part in fake.with_config(metadata={"a": "b"}).astream("hello")
] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(metadata={"a": "b"})),
]
spy.reset_mock()
assert await fake.with_config(recursion_limit=5, tags=["c"]).abatch(
["hello", "wooorld"], dict(metadata={"key": "value"})
) == [
5,
7,
]
assert spy.call_args_list == [
mocker.call(
"hello",
dict(
metadata={"key": "value"},
tags=["c"],
callbacks=None,
recursion_limit=5,
),
),
mocker.call(
"wooorld",
dict(
metadata={"key": "value"},
tags=["c"],
callbacks=None,
recursion_limit=5,
),
),
]
async def test_default_method_implementations(mocker: MockerFixture) -> None:
fake = FakeRunnable()
spy = mocker.spy(fake, "invoke")
assert fake.invoke("hello", dict(tags=["a-tag"])) == 5
assert spy.call_args_list == [
mocker.call("hello", dict(tags=["a-tag"])),
]
spy.reset_mock()
assert [*fake.stream("hello", dict(metadata={"key": "value"}))] == [5]
assert spy.call_args_list == [
mocker.call("hello", dict(metadata={"key": "value"})),
]
spy.reset_mock()
assert fake.batch(
["hello", "wooorld"], [dict(tags=["a-tag"]), dict(metadata={"key": "value"})]
) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
if i == 0:
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
else:
assert call.args[1].get("tags") == []
assert call.args[1].get("metadata") == {"key": "value"}
spy.reset_mock()
assert fake.batch(["hello", "wooorld"], dict(tags=["a-tag"])) == [5, 7]
assert len(spy.call_args_list) == 2
for i, call in enumerate(spy.call_args_list):
assert call.args[0] == ("hello" if i == 0 else "wooorld")
assert call.args[1].get("tags") == ["a-tag"]
assert call.args[1].get("metadata") == {}
spy.reset_mock()
assert await fake.ainvoke("hello", config={"callbacks": []}) == 5
assert spy.call_args_list == [
mocker.call("hello", dict(callbacks=[])),
]
spy.reset_mock()
assert [part async for part in fake.astream("hello")] == [5]
assert spy.call_args_list == [
mocker.call("hello", None),
]
spy.reset_mock()
assert await fake.abatch(["hello", "wooorld"], dict(metadata={"key": "value"})) == [
5,
7,
]
assert spy.call_args_list == [
mocker.call(
"hello",
dict(
metadata={"key": "value"},
tags=[],
callbacks=None,
recursion_limit=25,
),
),
mocker.call(
"wooorld",
dict(
metadata={"key": "value"},
tags=[],
callbacks=None,
recursion_limit=25,
),
),
]
async def test_prompt() -> None:
prompt = ChatPromptTemplate.from_messages(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
expected = ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert prompt.invoke({"question": "What is your name?"}) == expected
assert prompt.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [*prompt.stream({"question": "What is your name?"})] == [expected]
assert await prompt.ainvoke({"question": "What is your name?"}) == expected
assert await prompt.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
) == [
expected,
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert [
part async for part in prompt.astream({"question": "What is your name?"})
] == [expected]
stream_log = [
part async for part in prompt.astream_log({"question": "What is your name?"})
]
assert len(stream_log[0].ops) == 1
assert stream_log[0].ops[0]["op"] == "replace"
assert stream_log[0].ops[0]["path"] == ""
assert stream_log[0].ops[0]["value"]["logs"] == {}
assert stream_log[0].ops[0]["value"]["final_output"] is None
assert stream_log[0].ops[0]["value"]["streamed_output"] == []
assert isinstance(stream_log[0].ops[0]["value"]["id"], str)
assert stream_log[1:] == [
RunLogPatch(
{
"op": "replace",
"path": "/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
),
RunLogPatch({"op": "add", "path": "/streamed_output/-", "value": expected}),
]
stream_log_state = [
part
async for part in prompt.astream_log(
{"question": "What is your name?"}, diff=False
)
]
# remove random id
stream_log[0].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].ops[0]["value"]["id"] = "00000000-0000-0000-0000-000000000000"
stream_log_state[-1].state["id"] = "00000000-0000-0000-0000-000000000000"
# assert output with diff=False matches output with diff=True
assert stream_log_state[-1].ops == [op for chunk in stream_log for op in chunk.ops]
assert stream_log_state[-1] == RunLog(
*[op for chunk in stream_log for op in chunk.ops],
state={
"final_output": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
"id": "00000000-0000-0000-0000-000000000000",
"logs": {},
"streamed_output": [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
],
},
)
# nested inside trace_with_chain_group
async with atrace_as_chain_group("a_group") as manager:
stream_log_nested = [
part
async for part in prompt.astream_log(
{"question": "What is your name?"}, config={"callbacks": manager}
)
]
assert len(stream_log_nested[0].ops) == 1
assert stream_log_nested[0].ops[0]["op"] == "replace"
assert stream_log_nested[0].ops[0]["path"] == ""
assert stream_log_nested[0].ops[0]["value"]["logs"] == {}
assert stream_log_nested[0].ops[0]["value"]["final_output"] is None
assert stream_log_nested[0].ops[0]["value"]["streamed_output"] == []
assert isinstance(stream_log_nested[0].ops[0]["value"]["id"], str)
assert stream_log_nested[1:] == [
RunLogPatch(
{
"op": "replace",
"path": "/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
),
RunLogPatch({"op": "add", "path": "/streamed_output/-", "value": expected}),
]
def test_prompt_template_params() -> None:
prompt = ChatPromptTemplate.from_template(
"Respond to the following question: {question}"
)
result = prompt.invoke(
{
"question": "test",
"topic": "test",
}
)
assert result == ChatPromptValue(
messages=[HumanMessage(content="Respond to the following question: test")]
)
with pytest.raises(KeyError):
prompt.invoke({})
def test_with_listeners(mocker: MockerFixture) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
mock_start = mocker.Mock()
mock_end = mocker.Mock()
chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
with trace_as_chain_group("hello") as manager:
chain.with_listeners(on_start=mock_start, on_end=mock_end).invoke(
{"question": "Who are you?"}, {"callbacks": manager}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
async def test_with_listeners_async(mocker: MockerFixture) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
mock_start = mocker.Mock()
mock_end = mocker.Mock()
await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke(
{"question": "Who are you?"}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
mock_start.reset_mock()
mock_end.reset_mock()
async with atrace_as_chain_group("hello") as manager:
await chain.with_listeners(on_start=mock_start, on_end=mock_end).ainvoke(
{"question": "Who are you?"}, {"callbacks": manager}
)
assert mock_start.call_count == 1
assert mock_start.call_args[0][0].name == "RunnableSequence"
assert mock_end.call_count == 1
@freeze_time("2023-01-01")
def test_prompt_with_chat_model(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == AIMessage(content="foo")
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "batch")
chat_spy = mocker.spy(chat.__class__, "batch")
tracer = FakeTracer()
assert chain.batch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
dict(callbacks=[tracer]),
) == [
AIMessage(content="foo"),
AIMessage(content="foo"),
]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert chat_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert (
len(
[
r
for r in tracer.runs
if r.parent_run_id is None and len(r.child_runs) == 2
]
)
== 2
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "stream")
tracer = FakeTracer()
assert [
*chain.stream({"question": "What is your name?"}, dict(callbacks=[tracer]))
] == [
AIMessageChunk(content="f"),
AIMessageChunk(content="o"),
AIMessageChunk(content="o"),
]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
@freeze_time("2023-01-01")
async def test_prompt_with_chat_model_async(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo"])
chain: Runnable = prompt | chat
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == chat
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
chat_spy = mocker.spy(chat.__class__, "ainvoke")
tracer = FakeTracer()
assert await chain.ainvoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == AIMessage(content="foo")
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
chat_spy = mocker.spy(chat.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
dict(callbacks=[tracer]),
) == [
AIMessage(content="foo"),
AIMessage(content="foo"),
]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert chat_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert (
len(
[
r
for r in tracer.runs
if r.parent_run_id is None and len(r.child_runs) == 2
]
)
== 2
), "Each of 2 outer runs contains exactly two inner runs (1 prompt, 1 chat)"
mocker.stop(prompt_spy)
mocker.stop(chat_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
chat_spy = mocker.spy(chat.__class__, "astream")
tracer = FakeTracer()
assert [
a
async for a in chain.astream(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
] == [
AIMessageChunk(content="f"),
AIMessageChunk(content="o"),
AIMessageChunk(content="o"),
]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
@freeze_time("2023-01-01")
async def test_prompt_with_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain: Runnable = prompt | llm
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == []
assert chain.last == llm
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test batch
prompt_spy = mocker.spy(prompt.__class__, "abatch")
llm_spy = mocker.spy(llm.__class__, "abatch")
tracer = FakeTracer()
assert await chain.abatch(
[
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
],
dict(callbacks=[tracer]),
) == ["bar", "foo"]
assert prompt_spy.call_args.args[1] == [
{"question": "What is your name?"},
{"question": "What is your favorite color?"},
]
assert llm_spy.call_args.args[1] == [
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your favorite color?"),
]
),
]
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
# Test stream
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "astream")
tracer = FakeTracer()
assert [
token
async for token in chain.astream(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
] == ["bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
prompt_spy.reset_mock()
llm_spy.reset_mock()
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# remove ids from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert stream_log == [
RunLogPatch(
{
"op": "replace",
"path": "",
"value": {
"logs": {},
"final_output": None,
"streamed_output": [],
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "ChatPromptTemplate",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:1"],
"type": "prompt",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/ChatPromptTemplate/final_output",
"value": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
},
{
"op": "add",
"path": "/logs/ChatPromptTemplate/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM",
"value": {
"end_time": None,
"final_output": None,
"metadata": {},
"name": "FakeListLLM",
"start_time": "2023-01-01T00:00:00.000",
"streamed_output_str": [],
"tags": ["seq:step:2"],
"type": "llm",
},
}
),
RunLogPatch(
{
"op": "add",
"path": "/logs/FakeListLLM/final_output",
"value": {
"generations": [
[{"generation_info": None, "text": "foo", "type": "Generation"}]
],
"llm_output": None,
"run": None,
},
},
{
"op": "add",
"path": "/logs/FakeListLLM/end_time",
"value": "2023-01-01T00:00:00.000",
},
),
RunLogPatch({"op": "add", "path": "/streamed_output/-", "value": "foo"}),
RunLogPatch(
{"op": "replace", "path": "/final_output", "value": {"output": "foo"}}
),
]
@freeze_time("2023-01-01")
async def test_stream_log_retriever() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{documents}"
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
chain: Runnable = (
{"documents": FakeRetriever(), "question": itemgetter("question")}
| prompt
| {"one": llm, "two": llm}
)
stream_log = [
part async for part in chain.astream_log({"question": "What is your name?"})
]
# remove ids from logs
for part in stream_log:
for op in part.ops:
if (
isinstance(op["value"], dict)
and "id" in op["value"]
and not isinstance(op["value"]["id"], list) # serialized lc id
):
del op["value"]["id"]
assert sorted(cast(RunLog, add(stream_log)).state["logs"]) == [
"ChatPromptTemplate",
"FakeListLLM",
"FakeListLLM:2",
"Retriever",
"RunnableLambda",
"RunnableParallel",
"RunnableParallel:2",
]
@freeze_time("2023-01-01")
async def test_prompt_with_llm_and_async_lambda(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeListLLM(responses=["foo", "bar"])
async def passthrough(input: Any) -> Any:
return input
chain = prompt | llm | passthrough
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [llm]
assert chain.last == RunnableLambda(func=passthrough)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "ainvoke")
llm_spy = mocker.spy(llm.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await chain.ainvoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
)
== "foo"
)
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert tracer.runs == snapshot
mocker.stop(prompt_spy)
mocker.stop(llm_spy)
@freeze_time("2023-01-01")
def test_prompt_with_chat_model_and_parser(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == ["foo", "bar"]
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
def test_combining_sequences(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain = prompt | chat | parser
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [chat]
assert chain.last == parser
if sys.version_info >= (3, 9):
assert dumps(chain, pretty=True) == snapshot
prompt2 = (
SystemMessagePromptTemplate.from_template("You are a nicer assistant.")
+ "{question}"
)
chat2 = FakeListChatModel(responses=["baz, qux"])
parser2 = CommaSeparatedListOutputParser()
input_formatter: RunnableLambda[List[str], Dict[str, Any]] = RunnableLambda(
lambda x: {"question": x[0] + x[1]}
)
chain2 = cast(RunnableSequence, input_formatter | prompt2 | chat2 | parser2)
assert isinstance(chain, RunnableSequence)
assert chain2.first == input_formatter
assert chain2.middle == [prompt2, chat2]
assert chain2.last == parser2
if sys.version_info >= (3, 9):
assert dumps(chain2, pretty=True) == snapshot
combined_chain = cast(RunnableSequence, chain | chain2)
assert combined_chain.first == prompt
assert combined_chain.middle == [
chat,
parser,
input_formatter,
prompt2,
chat2,
]
assert combined_chain.last == parser2
if sys.version_info >= (3, 9):
assert dumps(combined_chain, pretty=True) == snapshot
# Test invoke
tracer = FakeTracer()
assert combined_chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == ["baz", "qux"]
if sys.version_info >= (3, 9):
assert tracer.runs == snapshot
@freeze_time("2023-01-01")
def test_seq_dict_prompt_llm(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
retriever = FakeRetriever()
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ """Context:
{documents}
Question:
{question}"""
)
chat = FakeListChatModel(responses=["foo, bar"])
parser = CommaSeparatedListOutputParser()
chain: Runnable = (
{
"question": RunnablePassthrough[str]() | passthrough,
"documents": passthrough | retriever,
"just_to_test_lambda": passthrough,
}
| prompt
| chat
| parser
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert isinstance(chain.first, RunnableParallel)
assert chain.middle == [prompt, chat]
assert chain.last == parser
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
parser_spy = mocker.spy(parser.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke("What is your name?", dict(callbacks=[tracer])) == [
"foo",
"bar",
]
assert prompt_spy.call_args.args[1] == {
"documents": [Document(page_content="foo"), Document(page_content="bar")],
"question": "What is your name?",
"just_to_test_lambda": "What is your name?",
}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(
content="""Context:
[Document(page_content='foo'), Document(page_content='bar')]
Question:
What is your name?"""
),
]
)
assert parser_spy.call_args.args[1] == AIMessage(content="foo, bar")
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 4
map_run = parent_run.child_runs[0]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_dict(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat,
"llm": llm,
}
)
assert repr(chain) == snapshot
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == {
"chat": AIMessage(content="i'm a chatbot"),
"llm": "i'm a textbot",
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 2
@freeze_time("2023-01-01")
async def test_router_runnable(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
chain1: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
chain2: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
router: Runnable = RouterRunnable({"math": chain1, "english": chain2})
chain: Runnable = {
"key": lambda x: x["key"],
"input": {"question": lambda x: x["question"]},
} | router
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
# Test invoke
router_spy = mocker.spy(router.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer]))
== "4"
)
assert router_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "RunnableSequence" # TODO: should be RunnableRouter
assert len(router_run.child_runs) == 2
@freeze_time("2023-01-01")
async def test_higher_order_lambda_runnable(
mocker: MockerFixture, snapshot: SnapshotAssertion
) -> None:
math_chain: Runnable = ChatPromptTemplate.from_template(
"You are a math genius. Answer the question: {question}"
) | FakeListLLM(responses=["4"])
english_chain: Runnable = ChatPromptTemplate.from_template(
"You are an english major. Answer the question: {question}"
) | FakeListLLM(responses=["2"])
input_map: Runnable = RunnableParallel(
key=lambda x: x["key"],
input={"question": lambda x: x["question"]},
)
def router(input: Dict[str, Any]) -> Runnable:
if input["key"] == "math":
return itemgetter("input") | math_chain
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
chain: Runnable = input_map | router
if sys.version_info >= (3, 9):
assert dumps(chain, pretty=True) == snapshot
result = chain.invoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = chain.batch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
result = await chain.ainvoke({"key": "math", "question": "2 + 2"})
assert result == "4"
result2 = await chain.abatch(
[{"key": "math", "question": "2 + 2"}, {"key": "english", "question": "2 + 2"}]
)
assert result2 == ["4", "2"]
# Test invoke
math_spy = mocker.spy(math_chain.__class__, "invoke")
tracer = FakeTracer()
assert (
chain.invoke({"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer]))
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "router"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
# Test ainvoke
async def arouter(input: Dict[str, Any]) -> Runnable:
if input["key"] == "math":
return itemgetter("input") | math_chain
elif input["key"] == "english":
return itemgetter("input") | english_chain
else:
raise ValueError(f"Unknown key: {input['key']}")
achain: Runnable = input_map | arouter
math_spy = mocker.spy(math_chain.__class__, "ainvoke")
tracer = FakeTracer()
assert (
await achain.ainvoke(
{"key": "math", "question": "2 + 2"}, dict(callbacks=[tracer])
)
== "4"
)
assert math_spy.call_args.args[1] == {
"key": "math",
"input": {"question": "2 + 2"},
}
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 2
router_run = parent_run.child_runs[1]
assert router_run.name == "arouter"
assert len(router_run.child_runs) == 1
math_run = router_run.child_runs[0]
assert math_run.name == "RunnableSequence"
assert len(math_run.child_runs) == 3
@freeze_time("2023-01-01")
def test_seq_prompt_map(mocker: MockerFixture, snapshot: SnapshotAssertion) -> None:
passthrough = mocker.Mock(side_effect=lambda x: x)
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat = FakeListChatModel(responses=["i'm a chatbot"])
llm = FakeListLLM(responses=["i'm a textbot"])
chain = (
prompt
| passthrough
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": passthrough,
}
)
assert isinstance(chain, RunnableSequence)
assert chain.first == prompt
assert chain.middle == [RunnableLambda(passthrough)]
assert isinstance(chain.last, RunnableParallel)
assert dumps(chain, pretty=True) == snapshot
# Test invoke
prompt_spy = mocker.spy(prompt.__class__, "invoke")
chat_spy = mocker.spy(chat.__class__, "invoke")
llm_spy = mocker.spy(llm.__class__, "invoke")
tracer = FakeTracer()
assert chain.invoke(
{"question": "What is your name?"}, dict(callbacks=[tracer])
) == {
"chat": AIMessage(content="i'm a chatbot"),
"llm": "i'm a textbot",
"passthrough": ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
),
}
assert prompt_spy.call_args.args[1] == {"question": "What is your name?"}
assert chat_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert llm_spy.call_args.args[1] == ChatPromptValue(
messages=[
SystemMessage(content="You are a nice assistant."),
HumanMessage(content="What is your name?"),
]
)
assert len([r for r in tracer.runs if r.parent_run_id is None]) == 1
parent_run = next(r for r in tracer.runs if r.parent_run_id is None)
assert len(parent_run.child_runs) == 3
map_run = parent_run.child_runs[2]
assert map_run.name == "RunnableParallel"
assert len(map_run.child_runs) == 3
def test_map_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
def test_map_stream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.stream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == "i'm a textbot"
async def test_map_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = prompt | {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": prompt.invoke({"question": "What is your name?"})},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + 1
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == prompt.invoke(
{"question": "What is your name?"}
)
# Test astream_log state accumulation
final_state = None
streamed_ops = []
async for chunk in chain.astream_log({"question": "What is your name?"}):
streamed_ops.extend(chunk.ops)
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert isinstance(final_state.state["id"], str)
assert len(final_state.ops) == len(streamed_ops)
assert len(final_state.state["logs"]) == 5
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"][
"final_output"
] == prompt.invoke({"question": "What is your name?"})
assert final_state.state["logs"]["RunnableParallel"]["name"] == "RunnableParallel"
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeListChatModel",
"FakeStreamingListLLM",
"RunnableParallel",
"RunnablePassthrough",
]
# Test astream_log with include filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, include_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 1
assert final_state.state["logs"]["FakeListChatModel"]["name"] == "FakeListChatModel"
# Test astream_log with exclude filters
final_state = None
async for chunk in chain.astream_log(
{"question": "What is your name?"}, exclude_names=["FakeListChatModel"]
):
if final_state is None:
final_state = chunk
else:
final_state += chunk
final_state = cast(RunLog, final_state)
assert final_state.state["final_output"] == final_value
assert len(final_state.state["streamed_output"]) == len(streamed_chunks)
assert len(final_state.state["logs"]) == 4
assert (
final_state.state["logs"]["ChatPromptTemplate"]["name"] == "ChatPromptTemplate"
)
assert final_state.state["logs"]["ChatPromptTemplate"]["final_output"] == (
prompt.invoke({"question": "What is your name?"})
)
assert final_state.state["logs"]["RunnableParallel"]["name"] == "RunnableParallel"
assert sorted(final_state.state["logs"]) == [
"ChatPromptTemplate",
"FakeStreamingListLLM",
"RunnableParallel",
"RunnablePassthrough",
]
async def test_map_astream_iterator_input() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
chat_res = "i'm a chatbot"
# sleep to better simulate a real stream
chat = FakeListChatModel(responses=[chat_res], sleep=0.01)
llm_res = "i'm a textbot"
# sleep to better simulate a real stream
llm = FakeStreamingListLLM(responses=[llm_res], sleep=0.01)
chain: Runnable = (
prompt
| llm
| {
"chat": chat.bind(stop=["Thought:"]),
"llm": llm,
"passthrough": RunnablePassthrough(),
}
)
stream = chain.astream({"question": "What is your name?"})
final_value = None
streamed_chunks = []
async for chunk in stream:
streamed_chunks.append(chunk)
if final_value is None:
final_value = chunk
else:
final_value += chunk
assert streamed_chunks[0] in [
{"passthrough": "i"},
{"llm": "i"},
{"chat": AIMessageChunk(content="i")},
]
assert len(streamed_chunks) == len(chat_res) + len(llm_res) + len(llm_res)
assert all(len(c.keys()) == 1 for c in streamed_chunks)
assert final_value is not None
assert final_value.get("chat").content == "i'm a chatbot"
assert final_value.get("llm") == "i'm a textbot"
assert final_value.get("passthrough") == llm_res
def test_with_config_with_config() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.with_config({"metadata": {"a": "b"}}).with_config(tags=["a-tag"])
) == dumpd(llm.with_config({"metadata": {"a": "b"}, "tags": ["a-tag"]}))
def test_metadata_is_merged() -> None:
"""Test metadata and tags defined in with_config and at are merged/concatend."""
foo = RunnableLambda(lambda x: x).with_config({"metadata": {"my_key": "my_value"}})
expected_metadata = {
"my_key": "my_value",
"my_other_key": "my_other_value",
}
with collect_runs() as cb:
foo.invoke("hi", {"metadata": {"my_other_key": "my_other_value"}})
run = cb.traced_runs[0]
assert run.extra is not None
assert run.extra["metadata"] == expected_metadata
def test_tags_are_appended() -> None:
"""Test tags from with_config are concatenated with those in invocation."""
foo = RunnableLambda(lambda x: x).with_config({"tags": ["my_key"]})
with collect_runs() as cb:
foo.invoke("hi", {"tags": ["invoked_key"]})
run = cb.traced_runs[0]
assert isinstance(run.tags, list)
assert sorted(run.tags) == sorted(["my_key", "invoked_key"])
def test_bind_bind() -> None:
llm = FakeListLLM(responses=["i'm a textbot"])
assert dumpd(
llm.bind(stop=["Thought:"], one="two").bind(
stop=["Observation:"], hello="world"
)
) == dumpd(llm.bind(stop=["Observation:"], one="two", hello="world"))
def test_deep_stream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.stream({"question": "What up"})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
for chunk in (chain | RunnablePassthrough()).stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
def test_deep_stream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.stream({"question": "What up"})
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain | RunnablePassthrough.assign(
hello=itemgetter("str") | llm
)
assert chain_with_assign.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
for chunk in chain_with_assign.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert chain_with_assign.invoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain | RunnablePassthrough.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign_shadow.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
for chunk in chain_with_assign_shadow.stream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert chain_with_assign_shadow.invoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
async def test_deep_astream() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
stream = chain.astream({"question": "What up"})
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
chunks = []
async for chunk in (chain | RunnablePassthrough()).astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
async def test_deep_astream_assign() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = prompt | llm | {"str": StrOutputParser()}
stream = chain.astream({"question": "What up"})
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert add(chunks) == {"str": "foo-lish"}
chain_with_assign = chain | RunnablePassthrough.assign(
hello=itemgetter("str") | llm,
)
assert chain_with_assign.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
async for chunk in chain_with_assign.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") * 2
assert chunks == [
# first stream passthrough input chunks
{"str": "f"},
{"str": "o"},
{"str": "o"},
{"str": "-"},
{"str": "l"},
{"str": "i"},
{"str": "s"},
{"str": "h"},
# then stream assign output chunks
{"hello": "f"},
{"hello": "o"},
{"hello": "o"},
{"hello": "-"},
{"hello": "l"},
{"hello": "i"},
{"hello": "s"},
{"hello": "h"},
]
assert add(chunks) == {"str": "foo-lish", "hello": "foo-lish"}
assert await chain_with_assign.ainvoke({"question": "What up"}) == {
"str": "foo-lish",
"hello": "foo-lish",
}
chain_with_assign_shadow = chain | RunnablePassthrough.assign(
str=lambda _: "shadow",
hello=itemgetter("str") | llm,
)
assert chain_with_assign_shadow.input_schema.schema() == {
"title": "PromptInput",
"type": "object",
"properties": {"question": {"title": "Question", "type": "string"}},
}
assert chain_with_assign_shadow.output_schema.schema() == {
"title": "RunnableAssignOutput",
"type": "object",
"properties": {
"str": {"title": "Str"},
"hello": {"title": "Hello", "type": "string"},
},
}
chunks = []
async for chunk in chain_with_assign_shadow.astream({"question": "What up"}):
chunks.append(chunk)
assert len(chunks) == len("foo-lish") + 1
assert add(chunks) == {"str": "shadow", "hello": "foo-lish"}
assert await chain_with_assign_shadow.ainvoke({"question": "What up"}) == {
"str": "shadow",
"hello": "foo-lish",
}
def test_runnable_sequence_transform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = llm | StrOutputParser()
stream = chain.transform(llm.stream("Hi there!"))
chunks = []
for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
async def test_runnable_sequence_atransform() -> None:
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain: Runnable = llm | StrOutputParser()
stream = chain.atransform(llm.astream("Hi there!"))
chunks = []
async for chunk in stream:
chunks.append(chunk)
assert len(chunks) == len("foo-lish")
assert "".join(chunks) == "foo-lish"
@pytest.fixture()
def llm_with_fallbacks() -> RunnableWithFallbacks:
error_llm = FakeListLLM(responses=["foo"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
return error_llm.with_fallbacks([pass_llm])
@pytest.fixture()
def llm_with_multi_fallbacks() -> RunnableWithFallbacks:
error_llm = FakeListLLM(responses=["foo"], i=1)
error_llm_2 = FakeListLLM(responses=["baz"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
return error_llm.with_fallbacks([error_llm_2, pass_llm])
@pytest.fixture()
def llm_chain_with_fallbacks() -> Runnable:
error_llm = FakeListLLM(responses=["foo"], i=1)
pass_llm = FakeListLLM(responses=["bar"])
prompt = PromptTemplate.from_template("what did baz say to {buz}")
return RunnableParallel({"buz": lambda x: x}) | (prompt | error_llm).with_fallbacks(
[prompt | pass_llm]
)
@pytest.mark.parametrize(
"runnable",
["llm_with_fallbacks", "llm_with_multi_fallbacks", "llm_chain_with_fallbacks"],
)
async def test_llm_with_fallbacks(
runnable: RunnableWithFallbacks, request: Any, snapshot: SnapshotAssertion
) -> None:
runnable = request.getfixturevalue(runnable)
assert runnable.invoke("hello") == "bar"
assert runnable.batch(["hi", "hey", "bye"]) == ["bar"] * 3
assert list(runnable.stream("hello")) == ["bar"]
assert await runnable.ainvoke("hello") == "bar"
assert await runnable.abatch(["hi", "hey", "bye"]) == ["bar"] * 3
assert list(await runnable.ainvoke("hello")) == list("bar")
if sys.version_info >= (3, 9):
assert dumps(runnable, pretty=True) == snapshot
class FakeSplitIntoListParser(BaseOutputParser[List[str]]):
"""Parse the output of an LLM call to a comma-separated list."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether or not the class is serializable."""
return True
def get_format_instructions(self) -> str:
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz`"
)
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
def test_each_simple() -> None:
"""Test that each() works with a simple runnable."""
parser = FakeSplitIntoListParser()
assert parser.invoke("first item, second item") == ["first item", "second item"]
assert parser.map().invoke(["a, b", "c"]) == [["a", "b"], ["c"]]
assert parser.map().map().invoke([["a, b", "c"], ["c, e"]]) == [
[["a", "b"], ["c"]],
[["c", "e"]],
]
def test_each(snapshot: SnapshotAssertion) -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
first_llm = FakeStreamingListLLM(responses=["first item, second item, third item"])
parser = FakeSplitIntoListParser()
second_llm = FakeStreamingListLLM(responses=["this", "is", "a", "test"])
chain = prompt | first_llm | parser | second_llm.map()
assert dumps(chain, pretty=True) == snapshot
output = chain.invoke({"question": "What up"})
assert output == ["this", "is", "a"]
assert (parser | second_llm.map()).invoke("first item, second item") == [
"test",
"this",
]
def test_recursive_lambda() -> None:
def _simple_recursion(x: int) -> Union[int, Runnable]:
if x < 10:
return RunnableLambda(lambda *args: _simple_recursion(x + 1))
else:
return x
runnable = RunnableLambda(_simple_recursion)
assert runnable.invoke(5) == 10
with pytest.raises(RecursionError):
runnable.invoke(0, {"recursion_limit": 9})
def test_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
elif x == 2:
raise RuntimeError("x is 2")
else:
return x
_lambda_mock = mocker.Mock(side_effect=_lambda)
runnable = RunnableLambda(_lambda_mock)
with pytest.raises(ValueError):
runnable.invoke(1)
assert _lambda_mock.call_count == 1
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
runnable.with_retry(
stop_after_attempt=2,
retry_if_exception_type=(ValueError,),
).invoke(1)
assert _lambda_mock.call_count == 2 # retried
_lambda_mock.reset_mock()
with pytest.raises(RuntimeError):
runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).invoke(2)
assert _lambda_mock.call_count == 1 # did not retry
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).batch([1, 2, 0])
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
_lambda_mock.reset_mock()
output = runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).batch([1, 2, 0], return_exceptions=True)
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
assert len(output) == 3
assert isinstance(output[0], ValueError)
assert isinstance(output[1], RuntimeError)
assert output[2] == 0
_lambda_mock.reset_mock()
async def test_async_retrying(mocker: MockerFixture) -> None:
def _lambda(x: int) -> Union[int, Runnable]:
if x == 1:
raise ValueError("x is 1")
elif x == 2:
raise RuntimeError("x is 2")
else:
return x
_lambda_mock = mocker.Mock(side_effect=_lambda)
runnable = RunnableLambda(_lambda_mock)
with pytest.raises(ValueError):
await runnable.ainvoke(1)
assert _lambda_mock.call_count == 1
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError, KeyError),
).ainvoke(1)
assert _lambda_mock.call_count == 2 # retried
_lambda_mock.reset_mock()
with pytest.raises(RuntimeError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).ainvoke(2)
assert _lambda_mock.call_count == 1 # did not retry
_lambda_mock.reset_mock()
with pytest.raises(ValueError):
await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).abatch([1, 2, 0])
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
_lambda_mock.reset_mock()
output = await runnable.with_retry(
stop_after_attempt=2,
wait_exponential_jitter=False,
retry_if_exception_type=(ValueError,),
).abatch([1, 2, 0], return_exceptions=True)
# 3rd input isn't retried because it succeeded
assert _lambda_mock.call_count == 3 + 2
assert len(output) == 3
assert isinstance(output[0], ValueError)
assert isinstance(output[1], RuntimeError)
assert output[2] == 0
_lambda_mock.reset_mock()
@freeze_time("2023-01-01")
def test_seq_batch_return_exceptions(mocker: MockerFixture) -> None:
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) -> None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
raise NotImplementedError()
def _batch(
self,
inputs: List[str],
) -> List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + "a")
return outputs
def batch(
self,
inputs: List[str],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[str]:
return self._batch_with_config(
self._batch,
inputs,
config,
return_exceptions=return_exceptions,
**kwargs,
)
chain = (
ControlledExceptionRunnable("bux")
| ControlledExceptionRunnable("bar")
| ControlledExceptionRunnable("baz")
| ControlledExceptionRunnable("foo")
)
assert isinstance(chain, RunnableSequence)
# Test batch
with pytest.raises(ValueError):
chain.batch(["foo", "bar", "baz", "qux"])
spy = mocker.spy(ControlledExceptionRunnable, "batch")
tracer = FakeTracer()
inputs = ["foo", "bar", "baz", "qux"]
outputs = chain.batch(inputs, dict(callbacks=[tracer]), return_exceptions=True)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == "quxaaaa"
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [
# inputs to sequence step 0
# same as inputs to sequence.batch()
["foo", "bar", "baz", "qux"],
# inputs to sequence step 1
# == outputs of sequence step 0 as no exceptions were raised
["fooa", "bara", "baza", "quxa"],
# inputs to sequence step 2
# 'bar' was dropped as it raised an exception in step 1
["fooaa", "bazaa", "quxaa"],
# inputs to sequence step 3
# 'baz' was dropped as it raised an exception in step 2
["fooaaa", "quxaaa"],
]
parent_runs = sorted(
(r for r in tracer.runs if r.parent_run_id is None),
key=lambda run: inputs.index(run.inputs["input"]),
)
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs["input"] == "foo"
assert parent_run_foo.error == repr(ValueError())
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs] == [
None,
None,
None,
repr(ValueError()),
]
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs["input"] == "bar"
assert parent_run_bar.error == repr(ValueError())
assert len(parent_run_bar.child_runs) == 2
assert [r.error for r in parent_run_bar.child_runs] == [
None,
repr(ValueError()),
]
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs["input"] == "baz"
assert parent_run_baz.error == repr(ValueError())
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs] == [
None,
None,
repr(ValueError()),
]
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs["input"] == "qux"
assert parent_run_qux.error is None
assert parent_run_qux.outputs is not None
assert parent_run_qux.outputs["output"] == "quxaaaa"
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None]
@freeze_time("2023-01-01")
async def test_seq_abatch_return_exceptions(mocker: MockerFixture) -> None:
class ControlledExceptionRunnable(Runnable[str, str]):
def __init__(self, fail_starts_with: str) -> None:
self.fail_starts_with = fail_starts_with
def invoke(self, input: Any, config: Optional[RunnableConfig] = None) -> Any:
raise NotImplementedError()
async def _abatch(
self,
inputs: List[str],
) -> List:
outputs: List[Any] = []
for input in inputs:
if input.startswith(self.fail_starts_with):
outputs.append(ValueError())
else:
outputs.append(input + "a")
return outputs
async def abatch(
self,
inputs: List[str],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[str]:
return await self._abatch_with_config(
self._abatch,
inputs,
config,
return_exceptions=return_exceptions,
**kwargs,
)
chain = (
ControlledExceptionRunnable("bux")
| ControlledExceptionRunnable("bar")
| ControlledExceptionRunnable("baz")
| ControlledExceptionRunnable("foo")
)
assert isinstance(chain, RunnableSequence)
# Test abatch
with pytest.raises(ValueError):
await chain.abatch(["foo", "bar", "baz", "qux"])
spy = mocker.spy(ControlledExceptionRunnable, "abatch")
tracer = FakeTracer()
inputs = ["foo", "bar", "baz", "qux"]
outputs = await chain.abatch(
inputs, dict(callbacks=[tracer]), return_exceptions=True
)
assert len(outputs) == 4
assert isinstance(outputs[0], ValueError)
assert isinstance(outputs[1], ValueError)
assert isinstance(outputs[2], ValueError)
assert outputs[3] == "quxaaaa"
assert spy.call_count == 4
inputs_to_batch = [c[0][1] for c in spy.call_args_list]
assert inputs_to_batch == [
# inputs to sequence step 0
# same as inputs to sequence.batch()
["foo", "bar", "baz", "qux"],
# inputs to sequence step 1
# == outputs of sequence step 0 as no exceptions were raised
["fooa", "bara", "baza", "quxa"],
# inputs to sequence step 2
# 'bar' was dropped as it raised an exception in step 1
["fooaa", "bazaa", "quxaa"],
# inputs to sequence step 3
# 'baz' was dropped as it raised an exception in step 2
["fooaaa", "quxaaa"],
]
parent_runs = sorted(
(r for r in tracer.runs if r.parent_run_id is None),
key=lambda run: inputs.index(run.inputs["input"]),
)
assert len(parent_runs) == 4
parent_run_foo = parent_runs[0]
assert parent_run_foo.inputs["input"] == "foo"
assert parent_run_foo.error == repr(ValueError())
assert len(parent_run_foo.child_runs) == 4
assert [r.error for r in parent_run_foo.child_runs] == [
None,
None,
None,
repr(ValueError()),
]
parent_run_bar = parent_runs[1]
assert parent_run_bar.inputs["input"] == "bar"
assert parent_run_bar.error == repr(ValueError())
assert len(parent_run_bar.child_runs) == 2
assert [r.error for r in parent_run_bar.child_runs] == [
None,
repr(ValueError()),
]
parent_run_baz = parent_runs[2]
assert parent_run_baz.inputs["input"] == "baz"
assert parent_run_baz.error == repr(ValueError())
assert len(parent_run_baz.child_runs) == 3
assert [r.error for r in parent_run_baz.child_runs] == [
None,
None,
repr(ValueError()),
]
parent_run_qux = parent_runs[3]
assert parent_run_qux.inputs["input"] == "qux"
assert parent_run_qux.error is None
assert parent_run_qux.outputs is not None
assert parent_run_qux.outputs["output"] == "quxaaaa"
assert len(parent_run_qux.child_runs) == 4
assert [r.error for r in parent_run_qux.child_runs] == [None, None, None, None]
def test_runnable_branch_init() -> None:
"""Verify that runnable branch gets initialized properly."""
add = RunnableLambda(lambda x: x + 1)
condition = RunnableLambda(lambda x: x > 0)
# Test failure with less than 2 branches
with pytest.raises(ValueError):
RunnableBranch((condition, add))
# Test failure with less than 2 branches
with pytest.raises(ValueError):
RunnableBranch(condition)
@pytest.mark.parametrize(
"branches",
[
[
(RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)),
RunnableLambda(lambda x: x - 1),
],
[
(RunnableLambda(lambda x: x > 0), RunnableLambda(lambda x: x + 1)),
(RunnableLambda(lambda x: x > 5), RunnableLambda(lambda x: x + 1)),
RunnableLambda(lambda x: x - 1),
],
[
(lambda x: x > 0, lambda x: x + 1),
(lambda x: x > 5, lambda x: x + 1),
lambda x: x - 1,
],
],
)
def test_runnable_branch_init_coercion(branches: Sequence[Any]) -> None:
"""Verify that runnable branch gets initialized properly."""
runnable = RunnableBranch[int, int](*branches)
for branch in runnable.branches:
condition, body = branch
assert isinstance(condition, Runnable)
assert isinstance(body, Runnable)
assert isinstance(runnable.default, Runnable)
assert runnable.input_schema.schema() == {"title": "RunnableBranchInput"}
def test_runnable_branch_invoke_call_counts(mocker: MockerFixture) -> None:
"""Verify that runnables are invoked only when necessary."""
# Test with single branch
add = RunnableLambda(lambda x: x + 1)
sub = RunnableLambda(lambda x: x - 1)
condition = RunnableLambda(lambda x: x > 0)
spy = mocker.spy(condition, "invoke")
add_spy = mocker.spy(add, "invoke")
branch = RunnableBranch[int, int]((condition, add), (condition, add), sub)
assert spy.call_count == 0
assert add_spy.call_count == 0
assert branch.invoke(1) == 2
assert add_spy.call_count == 1
assert spy.call_count == 1
assert branch.invoke(2) == 3
assert spy.call_count == 2
assert add_spy.call_count == 2
assert branch.invoke(-3) == -4
# Should fall through to default branch with condition being evaluated twice!
assert spy.call_count == 4
# Add should not be invoked
assert add_spy.call_count == 2
def test_runnable_branch_invoke() -> None:
# Test with single branch
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
# mypy cannot infer types from the lambda
(lambda x: x > 0 and x < 5, lambda x: x + 1), # type: ignore[misc]
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert branch.invoke(1) == 2
assert branch.invoke(10) == 100
assert branch.invoke(0) == -1
# Should raise an exception
with pytest.raises(ValueError):
branch.invoke(1000)
def test_runnable_branch_batch() -> None:
"""Test batch variant."""
# Test with single branch
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert branch.batch([1, 10, 0]) == [2, 100, -1]
async def test_runnable_branch_ainvoke() -> None:
"""Test async variant of invoke."""
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert await branch.ainvoke(1) == 2
assert await branch.ainvoke(10) == 100
assert await branch.ainvoke(0) == -1
# Verify that the async variant is used if available
async def condition(x: int) -> bool:
return x > 0
async def add(x: int) -> int:
return x + 1
async def sub(x: int) -> int:
return x - 1
branch = RunnableBranch[int, int]((condition, add), sub)
assert await branch.ainvoke(1) == 2
assert await branch.ainvoke(-10) == -11
def test_runnable_branch_invoke_callbacks() -> None:
"""Verify that callbacks are correctly used in invoke."""
tracer = FakeTracer()
def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
lambda x: x - 1,
)
assert branch.invoke(1, config={"callbacks": [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {"output": 0}
# Check that the chain on end is invoked
with pytest.raises(ValueError):
branch.invoke(1000, config={"callbacks": [tracer]})
assert len(tracer.runs) == 2
assert tracer.runs[1].error == "ValueError('x is too large')"
assert tracer.runs[1].outputs is None
async def test_runnable_branch_ainvoke_callbacks() -> None:
"""Verify that callbacks are invoked correctly in ainvoke."""
tracer = FakeTracer()
async def raise_value_error(x: int) -> int:
"""Raise a value error."""
raise ValueError("x is too large")
branch = RunnableBranch[int, int](
(lambda x: x > 100, raise_value_error),
lambda x: x - 1,
)
assert await branch.ainvoke(1, config={"callbacks": [tracer]}) == 0
assert len(tracer.runs) == 1
assert tracer.runs[0].error is None
assert tracer.runs[0].outputs == {"output": 0}
# Check that the chain on end is invoked
with pytest.raises(ValueError):
await branch.ainvoke(1000, config={"callbacks": [tracer]})
assert len(tracer.runs) == 2
assert tracer.runs[1].error == "ValueError('x is too large')"
assert tracer.runs[1].outputs is None
async def test_runnable_branch_abatch() -> None:
"""Test async variant of invoke."""
branch = RunnableBranch[int, int](
(lambda x: x > 0 and x < 5, lambda x: x + 1),
(lambda x: x > 5, lambda x: x * 10),
lambda x: x - 1,
)
assert await branch.abatch([1, 10, 0]) == [2, 100, -1]
@pytest.mark.skipif(
sys.version_info < (3, 9), reason="Requires python version >= 3.9 to run."
)
def test_representation_of_runnables() -> None:
"""Test representation of runnables."""
runnable = RunnableLambda(lambda x: x * 2)
assert repr(runnable) == "RunnableLambda(lambda x: x * 2)"
def f(x: int) -> int:
"""Return 2."""
return 2
assert repr(RunnableLambda(func=f)) == "RunnableLambda(...)"
async def af(x: int) -> int:
"""Return 2."""
return 2
assert repr(RunnableLambda(func=f, afunc=af)) == "RunnableLambda(...)"
assert repr(
RunnableLambda(lambda x: x + 2)
| {
"a": RunnableLambda(lambda x: x * 2),
"b": RunnableLambda(lambda x: x * 3),
}
) == (
"RunnableLambda(...)\n"
"| {\n"
" a: RunnableLambda(...),\n"
" b: RunnableLambda(...)\n"
" }"
), "repr where code string contains multiple lambdas gives up"
async def test_tool_from_runnable() -> None:
prompt = (
SystemMessagePromptTemplate.from_template("You are a nice assistant.")
+ "{question}"
)
llm = FakeStreamingListLLM(responses=["foo-lish"])
chain = prompt | llm | StrOutputParser()
chain_tool = tool("chain_tool", chain)
assert isinstance(chain_tool, BaseTool)
assert chain_tool.name == "chain_tool"
assert chain_tool.run({"question": "What up"}) == chain.invoke(
{"question": "What up"}
)
assert await chain_tool.arun({"question": "What up"}) == await chain.ainvoke(
{"question": "What up"}
)
assert chain_tool.description.endswith(repr(chain))
assert chain_tool.args_schema.schema() == chain.input_schema.schema()
assert chain_tool.args_schema.schema() == {
"properties": {"question": {"title": "Question", "type": "string"}},
"title": "PromptInput",
"type": "object",
}
async def test_runnable_gen() -> None:
"""Test that a generator can be used as a runnable."""
def gen(input: Iterator[Any]) -> Iterator[int]:
yield 1
yield 2
yield 3
runnable = RunnableGenerator(gen)
assert runnable.input_schema.schema() == {"title": "RunnableGeneratorInput"}
assert runnable.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert runnable.invoke(None) == 6
assert list(runnable.stream(None)) == [1, 2, 3]
assert runnable.batch([None, None]) == [6, 6]
async def agen(input: AsyncIterator[Any]) -> AsyncIterator[int]:
yield 1
yield 2
yield 3
arunnable = RunnableGenerator(agen)
assert await arunnable.ainvoke(None) == 6
assert [p async for p in arunnable.astream(None)] == [1, 2, 3]
assert await arunnable.abatch([None, None]) == [6, 6]
async def test_runnable_gen_transform() -> None:
"""Test that a generator can be used as a runnable."""
def gen_indexes(length_iter: Iterator[int]) -> Iterator[int]:
for i in range(next(length_iter)):
yield i
async def agen_indexes(length_iter: AsyncIterator[int]) -> AsyncIterator[int]:
async for length in length_iter:
for i in range(length):
yield i
def plus_one(input: Iterator[int]) -> Iterator[int]:
for i in input:
yield i + 1
async def aplus_one(input: AsyncIterator[int]) -> AsyncIterator[int]:
async for i in input:
yield i + 1
chain: Runnable = RunnableGenerator(gen_indexes, agen_indexes) | plus_one
achain = RunnableGenerator(gen_indexes, agen_indexes) | aplus_one
assert chain.input_schema.schema() == {
"title": "RunnableGeneratorInput",
"type": "integer",
}
assert chain.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert achain.input_schema.schema() == {
"title": "RunnableGeneratorInput",
"type": "integer",
}
assert achain.output_schema.schema() == {
"title": "RunnableGeneratorOutput",
"type": "integer",
}
assert list(chain.stream(3)) == [1, 2, 3]
assert [p async for p in achain.astream(4)] == [1, 2, 3, 4]
def test_with_config_callbacks() -> None:
result = RunnableLambda(lambda x: x).with_config({"callbacks": []})
# Bugfix from version 0.0.325
# ConfigError: field "callbacks" not yet prepared so type is still a ForwardRef,
# you might need to call RunnableConfig.update_forward_refs().
assert isinstance(result, RunnableBinding)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,326 | Enhancing iMessageChatLoader to prevent skipping messages with extractable content | ### Feature request
Implement extraction of message content whenever iMessageChatLoader skips messages with null `text` field even though the content is present in other fields.
### Motivation
Due to iMessage's database schema change in its new MacOS Ventura, newer messages now have content encoded in the `attributedBody` body, with the `text` field being null.
This issue was originally raised by @idvorkin in Issue #10680.
### Your contribution
We intend to submit a pull request for this issue close to the end of November. | https://github.com/langchain-ai/langchain/issues/13326 | https://github.com/langchain-ai/langchain/pull/13634 | e5256bcb69bde036cd02ca4871333451efc11833 | 32d794f5a3dac334d258f11a1b4b7d727a3549a4 | "2023-11-14T04:08:46Z" | python | "2023-11-28T20:45:43Z" | libs/langchain/langchain/chat_loaders/imessage.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Iterator, List, Optional, Union
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import HumanMessage
from langchain.chat_loaders.base import BaseChatLoader
if TYPE_CHECKING:
import sqlite3
class IMessageChatLoader(BaseChatLoader):
"""Load chat sessions from the `iMessage` chat.db SQLite file.
It only works on macOS when you have iMessage enabled and have the chat.db file.
The chat.db file is likely located at ~/Library/Messages/chat.db. However, your
terminal may not have permission to access this file. To resolve this, you can
copy the file to a different location, change the permissions of the file, or
grant full disk access for your terminal emulator
in System Settings > Security and Privacy > Full Disk Access.
"""
def __init__(self, path: Optional[Union[str, Path]] = None):
"""
Initialize the IMessageChatLoader.
Args:
path (str or Path, optional): Path to the chat.db SQLite file.
Defaults to None, in which case the default path
~/Library/Messages/chat.db will be used.
"""
if path is None:
path = Path.home() / "Library" / "Messages" / "chat.db"
self.db_path = path if isinstance(path, Path) else Path(path)
if not self.db_path.exists():
raise FileNotFoundError(f"File {self.db_path} not found")
try:
import sqlite3 # noqa: F401
except ImportError as e:
raise ImportError(
"The sqlite3 module is required to load iMessage chats.\n"
"Please install it with `pip install pysqlite3`"
) from e
def _load_single_chat_session(
self, cursor: "sqlite3.Cursor", chat_id: int
) -> ChatSession:
"""
Load a single chat session from the iMessage chat.db.
Args:
cursor: SQLite cursor object.
chat_id (int): ID of the chat session to load.
Returns:
ChatSession: Loaded chat session.
"""
results: List[HumanMessage] = []
query = """
SELECT message.date, handle.id, message.text
FROM message
JOIN chat_message_join ON message.ROWID = chat_message_join.message_id
JOIN handle ON message.handle_id = handle.ROWID
WHERE chat_message_join.chat_id = ?
ORDER BY message.date ASC;
"""
cursor.execute(query, (chat_id,))
messages = cursor.fetchall()
for date, sender, text in messages:
if text: # Skip empty messages
results.append(
HumanMessage(
role=sender,
content=text,
additional_kwargs={
"message_time": date,
"sender": sender,
},
)
)
return ChatSession(messages=results)
def lazy_load(self) -> Iterator[ChatSession]:
"""
Lazy load the chat sessions from the iMessage chat.db
and yield them in the required format.
Yields:
ChatSession: Loaded chat session.
"""
import sqlite3
try:
conn = sqlite3.connect(self.db_path)
except sqlite3.OperationalError as e:
raise ValueError(
f"Could not open iMessage DB file {self.db_path}.\n"
"Make sure your terminal emulator has disk access to this file.\n"
" You can either copy the DB file to an accessible location"
" or grant full disk access for your terminal emulator."
" You can grant full disk access for your terminal emulator"
" in System Settings > Security and Privacy > Full Disk Access."
) from e
cursor = conn.cursor()
# Fetch the list of chat IDs sorted by time (most recent first)
query = """SELECT chat_id
FROM message
JOIN chat_message_join ON message.ROWID = chat_message_join.message_id
GROUP BY chat_id
ORDER BY MAX(date) DESC;"""
cursor.execute(query)
chat_ids = [row[0] for row in cursor.fetchall()]
for chat_id in chat_ids:
yield self._load_single_chat_session(cursor, chat_id)
conn.close()
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,326 | Enhancing iMessageChatLoader to prevent skipping messages with extractable content | ### Feature request
Implement extraction of message content whenever iMessageChatLoader skips messages with null `text` field even though the content is present in other fields.
### Motivation
Due to iMessage's database schema change in its new MacOS Ventura, newer messages now have content encoded in the `attributedBody` body, with the `text` field being null.
This issue was originally raised by @idvorkin in Issue #10680.
### Your contribution
We intend to submit a pull request for this issue close to the end of November. | https://github.com/langchain-ai/langchain/issues/13326 | https://github.com/langchain-ai/langchain/pull/13634 | e5256bcb69bde036cd02ca4871333451efc11833 | 32d794f5a3dac334d258f11a1b4b7d727a3549a4 | "2023-11-14T04:08:46Z" | python | "2023-11-28T20:45:43Z" | libs/langchain/tests/unit_tests/chat_loaders/data/imessage_chat.db | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 13,326 | Enhancing iMessageChatLoader to prevent skipping messages with extractable content | ### Feature request
Implement extraction of message content whenever iMessageChatLoader skips messages with null `text` field even though the content is present in other fields.
### Motivation
Due to iMessage's database schema change in its new MacOS Ventura, newer messages now have content encoded in the `attributedBody` body, with the `text` field being null.
This issue was originally raised by @idvorkin in Issue #10680.
### Your contribution
We intend to submit a pull request for this issue close to the end of November. | https://github.com/langchain-ai/langchain/issues/13326 | https://github.com/langchain-ai/langchain/pull/13634 | e5256bcb69bde036cd02ca4871333451efc11833 | 32d794f5a3dac334d258f11a1b4b7d727a3549a4 | "2023-11-14T04:08:46Z" | python | "2023-11-28T20:45:43Z" | libs/langchain/tests/unit_tests/chat_loaders/test_imessage.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 10,674 | HuggingFace Data Loader fails when context is not str | ### System Info
langchain 0.0.285
python 3.11.4
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [X] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
Try to load https://huggingface.co/datasets/hotpot_qa/viewer/fullwiki/validation
from langchain.document_loaders import HuggingFaceDatasetLoader
dataset_name = "hotpot_qa"
page_content_column = "context"
name = "fullwiki"
loader = HuggingFaceDatasetLoader(dataset_name, page_content_column, name)
docs = loader.load()
---------------------------------------------------------------------------
ValidationError Traceback (most recent call last)
[/Users/deanchanter/Documents/GitHub/comma-chameleons/hello_doc_read.ipynb](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/hello_doc_read.ipynb) Cell 1 line 8
[4](vscode-notebook-cell:/Users/deanchanter/Documents/GitHub/comma-chameleons/hello_doc_read.ipynb#Y125sZmlsZQ%3D%3D?line=3) name = "fullwiki"
[7](vscode-notebook-cell:/Users/deanchanter/Documents/GitHub/comma-chameleons/hello_doc_read.ipynb#Y125sZmlsZQ%3D%3D?line=6) loader = HuggingFaceDatasetLoader(dataset_name, page_content_column, name)
----> [8](vscode-notebook-cell:/Users/deanchanter/Documents/GitHub/comma-chameleons/hello_doc_read.ipynb#Y125sZmlsZQ%3D%3D?line=7) docs = loader.load()
File [~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:87](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:87), in HuggingFaceDatasetLoader.load(self)
85 def load(self) -> List[Document]:
86 """Load documents."""
---> 87 return list(self.lazy_load())
File [~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:76](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:76), in HuggingFaceDatasetLoader.lazy_load(self)
59 raise ImportError(
60 "Could not import datasets python package. "
61 "Please install it with `pip install datasets`."
62 )
64 dataset = load_dataset(
65 path=self.path,
66 name=self.name,
(...)
73 num_proc=self.num_proc,
74 )
---> 76 yield from (
77 Document(
78 page_content=row.pop(self.page_content_column),
79 metadata=row,
80 )
81 for key in dataset.keys()
82 for row in dataset[key]
83 )
File [~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:77](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/document_loaders/hugging_face_dataset.py:77), in <genexpr>(.0)
59 raise ImportError(
60 "Could not import datasets python package. "
61 "Please install it with `pip install datasets`."
62 )
64 dataset = load_dataset(
65 path=self.path,
66 name=self.name,
(...)
73 num_proc=self.num_proc,
74 )
76 yield from (
---> 77 Document(
78 page_content=row.pop(self.page_content_column),
79 metadata=row,
80 )
81 for key in dataset.keys()
82 for row in dataset[key]
83 )
File [~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/load/serializable.py:75](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/langchain/load/serializable.py:75), in Serializable.__init__(self, **kwargs)
74 def __init__(self, **kwargs: Any) -> None:
---> 75 super().__init__(**kwargs)
76 self._lc_kwargs = kwargs
File [~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/pydantic/main.py:341](https://file+.vscode-resource.vscode-cdn.net/Users/deanchanter/Documents/GitHub/comma-chameleons/~/Documents/GitHub/comma-chameleons/env/lib/python3.11/site-packages/pydantic/main.py:341), in pydantic.main.BaseModel.__init__()
ValidationError: 1 validation error for Document
page_content
str type expected (type=type_error.str)
### Expected behavior
Either extend class to handle more types of data or update docs. Will to do a PR to extend if your open. | https://github.com/langchain-ai/langchain/issues/10674 | https://github.com/langchain-ai/langchain/pull/13864 | 981f78f920d4e0514b7e627d9f3266afcccc9859 | 750485eaa8d6ca364f00454df4602abe2a8c9ba1 | "2023-09-16T10:49:37Z" | python | "2023-11-29T03:33:16Z" | libs/langchain/langchain/document_loaders/hugging_face_dataset.py | from typing import Iterator, List, Mapping, Optional, Sequence, Union
from langchain_core.documents import Document
from langchain.document_loaders.base import BaseLoader
class HuggingFaceDatasetLoader(BaseLoader):
"""Load from `Hugging Face Hub` datasets."""
def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
Note: Currently the function assumes the content is a string.
If it is not download the dataset using huggingface library and convert
using the json or pandas loaders.
https://github.com/langchain-ai/langchain/issues/10674
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=row.pop(self.page_content_column),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
def load(self) -> List[Document]:
"""Load documents."""
return list(self.lazy_load())
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 7,034 | Loading online PDFs gives temporary file path as source in metadata | Hi,
first up, thank you for making langchain! I was playing around a little and found a minor issue with loading online PDFs, and would like to start contributing to langchain maybe by fixing this.
### System Info
langchain 0.0.220, google collab, python 3.10
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [X] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```python
from langchain.document_loaders import PyMuPDFLoader
loader = PyMuPDFLoader('https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf')
pages = loader.load()
pages[0].metadata
```
<img width="977" alt="image" src="https://github.com/hwchase17/langchain/assets/21276922/4ededc60-bb03-4502-a8c8-3c221ab109c4">
### Expected behavior
Instead of giving the temporary file path, which is not useful and deleted shortly after, it could be more helpful if the source is set to be the URL passed to it. This would require some fixes in the `langchain/document_loaders/pdf.py` file. | https://github.com/langchain-ai/langchain/issues/7034 | https://github.com/langchain-ai/langchain/pull/13274 | 6f64cb5078bb71007d25fff847541fd8f7713c0c | 9bd6e9df365e966938979511237c035a02fb4fa9 | "2023-07-01T23:24:53Z" | python | "2023-11-29T20:07:46Z" | libs/langchain/langchain/document_loaders/parsers/pdf.py | """Module contains common parsers for PDFs."""
from __future__ import annotations
import warnings
from typing import (
TYPE_CHECKING,
Any,
Iterable,
Iterator,
Mapping,
Optional,
Sequence,
Union,
)
from urllib.parse import urlparse
import numpy as np
from langchain_core.documents import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
if TYPE_CHECKING:
import fitz.fitz
import pdfminer.layout
import pdfplumber.page
import pypdf._page
import pypdfium2._helpers.page
_PDF_FILTER_WITH_LOSS = ["DCTDecode", "DCT", "JPXDecode"]
_PDF_FILTER_WITHOUT_LOSS = [
"LZWDecode",
"LZW",
"FlateDecode",
"Fl",
"ASCII85Decode",
"A85",
"ASCIIHexDecode",
"AHx",
"RunLengthDecode",
"RL",
"CCITTFaxDecode",
"CCF",
"JBIG2Decode",
]
def extract_from_images_with_rapidocr(
images: Sequence[Union[Iterable[np.ndarray], bytes]]
) -> str:
"""Extract text from images with RapidOCR.
Args:
images: Images to extract text from.
Returns:
Text extracted from images.
Raises:
ImportError: If `rapidocr-onnxruntime` package is not installed.
"""
try:
from rapidocr_onnxruntime import RapidOCR
except ImportError:
raise ImportError(
"`rapidocr-onnxruntime` package not found, please install it with "
"`pip install rapidocr-onnxruntime`"
)
ocr = RapidOCR()
text = ""
for img in images:
result, _ = ocr(img)
if result:
result = [text[1] for text in result]
text += "\n".join(result)
return text
class PyPDFParser(BaseBlobParser):
"""Load `PDF` using `pypdf`"""
def __init__(
self, password: Optional[Union[str, bytes]] = None, extract_images: bool = False
):
self.password = password
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import pypdf
with blob.as_bytes_io() as pdf_file_obj:
pdf_reader = pypdf.PdfReader(pdf_file_obj, password=self.password)
yield from [
Document(
page_content=page.extract_text()
+ self._extract_images_from_page(page),
metadata={"source": blob.source, "page": page_number},
)
for page_number, page in enumerate(pdf_reader.pages)
]
def _extract_images_from_page(self, page: pypdf._page.PageObject) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images or "/XObject" not in page["/Resources"].keys():
return ""
xObject = page["/Resources"]["/XObject"].get_object() # type: ignore
images = []
for obj in xObject:
if xObject[obj]["/Subtype"] == "/Image":
if xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITHOUT_LOSS:
height, width = xObject[obj]["/Height"], xObject[obj]["/Width"]
images.append(
np.frombuffer(xObject[obj].get_data(), dtype=np.uint8).reshape(
height, width, -1
)
)
elif xObject[obj]["/Filter"][1:] in _PDF_FILTER_WITH_LOSS:
images.append(xObject[obj].get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class PDFMinerParser(BaseBlobParser):
"""Parse `PDF` using `PDFMiner`."""
def __init__(self, extract_images: bool = False, *, concatenate_pages: bool = True):
"""Initialize a parser based on PDFMiner.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
self.extract_images = extract_images
self.concatenate_pages = concatenate_pages
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
if not self.extract_images:
from pdfminer.high_level import extract_text
with blob.as_bytes_io() as pdf_file_obj:
if self.concatenate_pages:
text = extract_text(pdf_file_obj)
metadata = {"source": blob.source}
yield Document(page_content=text, metadata=metadata)
else:
from pdfminer.pdfpage import PDFPage
pages = PDFPage.get_pages(pdf_file_obj)
for i, _ in enumerate(pages):
text = extract_text(pdf_file_obj, page_numbers=[i])
metadata = {"source": blob.source, "page": str(i)}
yield Document(page_content=text, metadata=metadata)
else:
import io
from pdfminer.converter import PDFPageAggregator, TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfinterp import PDFPageInterpreter, PDFResourceManager
from pdfminer.pdfpage import PDFPage
text_io = io.StringIO()
with blob.as_bytes_io() as pdf_file_obj:
pages = PDFPage.get_pages(pdf_file_obj)
rsrcmgr = PDFResourceManager()
device_for_text = TextConverter(rsrcmgr, text_io, laparams=LAParams())
device_for_image = PDFPageAggregator(rsrcmgr, laparams=LAParams())
interpreter_for_text = PDFPageInterpreter(rsrcmgr, device_for_text)
interpreter_for_image = PDFPageInterpreter(rsrcmgr, device_for_image)
for i, page in enumerate(pages):
interpreter_for_text.process_page(page)
interpreter_for_image.process_page(page)
content = text_io.getvalue() + self._extract_images_from_page(
device_for_image.get_result()
)
text_io.truncate(0)
text_io.seek(0)
metadata = {"source": blob.source, "page": str(i)}
yield Document(page_content=content, metadata=metadata)
def _extract_images_from_page(self, page: pdfminer.layout.LTPage) -> str:
"""Extract images from page and get the text with RapidOCR."""
import pdfminer
def get_image(layout_object: Any) -> Any:
if isinstance(layout_object, pdfminer.layout.LTImage):
return layout_object
if isinstance(layout_object, pdfminer.layout.LTContainer):
for child in layout_object:
return get_image(child)
else:
return None
images = []
for img in list(filter(bool, map(get_image, page))):
if img.stream["Filter"].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(
np.frombuffer(img.stream.get_data(), dtype=np.uint8).reshape(
img.stream["Height"], img.stream["Width"], -1
)
)
elif img.stream["Filter"].name in _PDF_FILTER_WITH_LOSS:
images.append(img.stream.get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class PyMuPDFParser(BaseBlobParser):
"""Parse `PDF` using `PyMuPDF`."""
def __init__(
self,
text_kwargs: Optional[Mapping[str, Any]] = None,
extract_images: bool = False,
) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``fitz.Page.get_text()``.
"""
self.text_kwargs = text_kwargs or {}
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import fitz
with blob.as_bytes_io() as file_path:
doc = fitz.open(file_path) # open document
yield from [
Document(
page_content=page.get_text(**self.text_kwargs)
+ self._extract_images_from_page(doc, page),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.number,
"total_pages": len(doc),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc
]
def _extract_images_from_page(
self, doc: fitz.fitz.Document, page: fitz.fitz.Page
) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
import fitz
img_list = page.get_images()
imgs = []
for img in img_list:
xref = img[0]
pix = fitz.Pixmap(doc, xref)
imgs.append(
np.frombuffer(pix.samples, dtype=np.uint8).reshape(
pix.height, pix.width, -1
)
)
return extract_from_images_with_rapidocr(imgs)
class PyPDFium2Parser(BaseBlobParser):
"""Parse `PDF` with `PyPDFium2`."""
def __init__(self, extract_images: bool = False) -> None:
"""Initialize the parser."""
try:
import pypdfium2 # noqa:F401
except ImportError:
raise ImportError(
"pypdfium2 package not found, please install it with"
" `pip install pypdfium2`"
)
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import pypdfium2
# pypdfium2 is really finicky with respect to closing things,
# if done incorrectly creates seg faults.
with blob.as_bytes_io() as file_path:
pdf_reader = pypdfium2.PdfDocument(file_path, autoclose=True)
try:
for page_number, page in enumerate(pdf_reader):
text_page = page.get_textpage()
content = text_page.get_text_range()
text_page.close()
content += "\n" + self._extract_images_from_page(page)
page.close()
metadata = {"source": blob.source, "page": page_number}
yield Document(page_content=content, metadata=metadata)
finally:
pdf_reader.close()
def _extract_images_from_page(self, page: pypdfium2._helpers.page.PdfPage) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
import pypdfium2.raw as pdfium_c
images = list(page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,)))
images = list(map(lambda x: x.get_bitmap().to_numpy(), images))
return extract_from_images_with_rapidocr(images)
class PDFPlumberParser(BaseBlobParser):
"""Parse `PDF` with `PDFPlumber`."""
def __init__(
self,
text_kwargs: Optional[Mapping[str, Any]] = None,
dedupe: bool = False,
extract_images: bool = False,
) -> None:
"""Initialize the parser.
Args:
text_kwargs: Keyword arguments to pass to ``pdfplumber.Page.extract_text()``
dedupe: Avoiding the error of duplicate characters if `dedupe=True`.
"""
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import pdfplumber
with blob.as_bytes_io() as file_path:
doc = pdfplumber.open(file_path) # open document
yield from [
Document(
page_content=self._process_page_content(page)
+ "\n"
+ self._extract_images_from_page(page),
metadata=dict(
{
"source": blob.source,
"file_path": blob.source,
"page": page.page_number - 1,
"total_pages": len(doc.pages),
},
**{
k: doc.metadata[k]
for k in doc.metadata
if type(doc.metadata[k]) in [str, int]
},
),
)
for page in doc.pages
]
def _process_page_content(self, page: pdfplumber.page.Page) -> str:
"""Process the page content based on dedupe."""
if self.dedupe:
return page.dedupe_chars().extract_text(**self.text_kwargs)
return page.extract_text(**self.text_kwargs)
def _extract_images_from_page(self, page: pdfplumber.page.Page) -> str:
"""Extract images from page and get the text with RapidOCR."""
if not self.extract_images:
return ""
images = []
for img in page.images:
if img["stream"]["Filter"].name in _PDF_FILTER_WITHOUT_LOSS:
images.append(
np.frombuffer(img["stream"].get_data(), dtype=np.uint8).reshape(
img["stream"]["Height"], img["stream"]["Width"], -1
)
)
elif img["stream"]["Filter"].name in _PDF_FILTER_WITH_LOSS:
images.append(img["stream"].get_data())
else:
warnings.warn("Unknown PDF Filter!")
return extract_from_images_with_rapidocr(images)
class AmazonTextractPDFParser(BaseBlobParser):
"""Send `PDF` files to `Amazon Textract` and parse them.
For parsing multi-page PDFs, they have to reside on S3.
The AmazonTextractPDFLoader calls the
[Amazon Textract Service](https://aws.amazon.com/textract/)
to convert PDFs into a Document structure.
Single and multi-page documents are supported with up to 3000 pages
and 512 MB of size.
For the call to be successful an AWS account is required,
similar to the
[AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)
requirements.
Besides the AWS configuration, it is very similar to the other PDF
loaders, while also supporting JPEG, PNG and TIFF and non-native
PDF formats.
```python
from langchain.document_loaders import AmazonTextractPDFLoader
loader=AmazonTextractPDFLoader("example_data/alejandro_rosalez_sample-small.jpeg")
documents = loader.load()
```
One feature is the linearization of the output.
When using the features LAYOUT, FORMS or TABLES together with Textract
```python
from langchain.document_loaders import AmazonTextractPDFLoader
# you can mix and match each of the features
loader=AmazonTextractPDFLoader(
"example_data/alejandro_rosalez_sample-small.jpeg",
textract_features=["TABLES", "LAYOUT"])
documents = loader.load()
```
it will generate output that formats the text in reading order and
try to output the information in a tabular structure or
output the key/value pairs with a colon (key: value).
This helps most LLMs to achieve better accuracy when
processing these texts.
"""
def __init__(
self,
textract_features: Optional[Sequence[int]] = None,
client: Optional[Any] = None,
) -> None:
"""Initializes the parser.
Args:
textract_features: Features to be used for extraction, each feature
should be passed as an int that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client
"""
try:
import textractcaller as tc
import textractor.entities.document as textractor
self.tc = tc
self.textractor = textractor
if textract_features is not None:
self.textract_features = [
tc.Textract_Features(f) for f in textract_features
]
else:
self.textract_features = []
except ImportError:
raise ImportError(
"Could not import amazon-textract-caller or "
"amazon-textract-textractor python package. Please install it "
"with `pip install amazon-textract-caller` & "
"`pip install amazon-textract-textractor`."
)
if not client:
try:
import boto3
self.boto3_textract_client = boto3.client("textract")
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
else:
self.boto3_textract_client = client
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Iterates over the Blob pages and returns an Iterator with a Document
for each page, like the other parsers If multi-page document, blob.path
has to be set to the S3 URI and for single page docs
the blob.data is taken
"""
url_parse_result = urlparse(str(blob.path)) if blob.path else None
# Either call with S3 path (multi-page) or with bytes (single-page)
if (
url_parse_result
and url_parse_result.scheme == "s3"
and url_parse_result.netloc
):
textract_response_json = self.tc.call_textract(
input_document=str(blob.path),
features=self.textract_features,
boto3_textract_client=self.boto3_textract_client,
)
else:
textract_response_json = self.tc.call_textract(
input_document=blob.as_bytes(),
features=self.textract_features,
call_mode=self.tc.Textract_Call_Mode.FORCE_SYNC,
boto3_textract_client=self.boto3_textract_client,
)
document = self.textractor.Document.open(textract_response_json)
linearizer_config = self.textractor.TextLinearizationConfig(
hide_figure_layout=True,
title_prefix="# ",
section_header_prefix="## ",
list_element_prefix="*",
)
for idx, page in enumerate(document.pages):
yield Document(
page_content=page.get_text(config=linearizer_config),
metadata={"source": blob.source, "page": idx + 1},
)
class DocumentIntelligenceParser(BaseBlobParser):
"""Loads a PDF with Azure Document Intelligence
(formerly Forms Recognizer) and chunks at character level."""
def __init__(self, client: Any, model: str):
self.client = client
self.model = model
def _generate_docs(self, blob: Blob, result: Any) -> Iterator[Document]:
for p in result.pages:
content = " ".join([line.content for line in p.lines])
d = Document(
page_content=content,
metadata={
"source": blob.source,
"page": p.page_number,
},
)
yield d
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
with blob.as_bytes_io() as file_obj:
poller = self.client.begin_analyze_document(self.model, file_obj)
result = poller.result()
docs = self._generate_docs(blob, result)
yield from docs
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 7,034 | Loading online PDFs gives temporary file path as source in metadata | Hi,
first up, thank you for making langchain! I was playing around a little and found a minor issue with loading online PDFs, and would like to start contributing to langchain maybe by fixing this.
### System Info
langchain 0.0.220, google collab, python 3.10
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [X] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```python
from langchain.document_loaders import PyMuPDFLoader
loader = PyMuPDFLoader('https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf')
pages = loader.load()
pages[0].metadata
```
<img width="977" alt="image" src="https://github.com/hwchase17/langchain/assets/21276922/4ededc60-bb03-4502-a8c8-3c221ab109c4">
### Expected behavior
Instead of giving the temporary file path, which is not useful and deleted shortly after, it could be more helpful if the source is set to be the URL passed to it. This would require some fixes in the `langchain/document_loaders/pdf.py` file. | https://github.com/langchain-ai/langchain/issues/7034 | https://github.com/langchain-ai/langchain/pull/13274 | 6f64cb5078bb71007d25fff847541fd8f7713c0c | 9bd6e9df365e966938979511237c035a02fb4fa9 | "2023-07-01T23:24:53Z" | python | "2023-11-29T20:07:46Z" | libs/langchain/langchain/document_loaders/pdf.py | import json
import logging
import os
import tempfile
import time
from abc import ABC
from io import StringIO
from pathlib import Path
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Union
from urllib.parse import urlparse
import requests
from langchain_core.documents import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.blob_loaders import Blob
from langchain.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
)
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__file__)
class UnstructuredPDFLoader(UnstructuredFileLoader):
"""Load `PDF` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Examples
--------
from langchain.document_loaders import UnstructuredPDFLoader
loader = UnstructuredPDFLoader(
"example.pdf", mode="elements", strategy="fast",
)
docs = loader.load()
References
----------
https://unstructured-io.github.io/unstructured/bricks.html#partition-pdf
"""
def _get_elements(self) -> List:
from unstructured.partition.pdf import partition_pdf
return partition_pdf(filename=self.file_path, **self.unstructured_kwargs)
class BasePDFLoader(BaseLoader, ABC):
"""Base Loader class for `PDF` files.
If the file is a web path, it will download it to a temporary file, use it, then
clean up the temporary file after completion.
"""
def __init__(self, file_path: str, *, headers: Optional[Dict] = None):
"""Initialize with a file path.
Args:
file_path: Either a local, S3 or web path to a PDF file.
headers: Headers to use for GET request to download a file from a web path.
"""
self.file_path = file_path
self.web_path = None
self.headers = headers
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path or S3, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
self.temp_dir = tempfile.TemporaryDirectory()
_, suffix = os.path.splitext(self.file_path)
temp_pdf = os.path.join(self.temp_dir.name, f"tmp{suffix}")
self.web_path = self.file_path
if not self._is_s3_url(self.file_path):
r = requests.get(self.file_path, headers=self.headers)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
with open(temp_pdf, mode="wb") as f:
f.write(r.content)
self.file_path = str(temp_pdf)
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
def __del__(self) -> None:
if hasattr(self, "temp_dir"):
self.temp_dir.cleanup()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
@staticmethod
def _is_s3_url(url: str) -> bool:
"""check if the url is S3"""
try:
result = urlparse(url)
if result.scheme == "s3" and result.netloc:
return True
return False
except ValueError:
return False
@property
def source(self) -> str:
return self.web_path if self.web_path is not None else self.file_path
class OnlinePDFLoader(BasePDFLoader):
"""Load online `PDF`."""
def load(self) -> List[Document]:
"""Load documents."""
loader = UnstructuredPDFLoader(str(self.file_path))
return loader.load()
class PyPDFLoader(BasePDFLoader):
"""Load PDF using pypdf into list of documents.
Loader chunks by page and stores page numbers in metadata.
"""
def __init__(
self,
file_path: str,
password: Optional[Union[str, bytes]] = None,
headers: Optional[Dict] = None,
extract_images: bool = False,
) -> None:
"""Initialize with a file path."""
try:
import pypdf # noqa:F401
except ImportError:
raise ImportError(
"pypdf package not found, please install it with " "`pip install pypdf`"
)
super().__init__(file_path, headers=headers)
self.parser = PyPDFParser(password=password, extract_images=extract_images)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
if self.web_path:
blob = Blob.from_data(open(self.file_path, "rb").read(), path=self.web_path)
else:
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
class PyPDFium2Loader(BasePDFLoader):
"""Load `PDF` using `pypdfium2` and chunks at character level."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
):
"""Initialize with a file path."""
super().__init__(file_path, headers=headers)
self.parser = PyPDFium2Parser(extract_images=extract_images)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
class PyPDFDirectoryLoader(BaseLoader):
"""Load a directory with `PDF` files using `pypdf` and chunks at character level.
Loader also stores page numbers in metadata.
"""
def __init__(
self,
path: str,
glob: str = "**/[!.]*.pdf",
silent_errors: bool = False,
load_hidden: bool = False,
recursive: bool = False,
extract_images: bool = False,
):
self.path = path
self.glob = glob
self.load_hidden = load_hidden
self.recursive = recursive
self.silent_errors = silent_errors
self.extract_images = extract_images
@staticmethod
def _is_visible(path: Path) -> bool:
return not any(part.startswith(".") for part in path.parts)
def load(self) -> List[Document]:
p = Path(self.path)
docs = []
items = p.rglob(self.glob) if self.recursive else p.glob(self.glob)
for i in items:
if i.is_file():
if self._is_visible(i.relative_to(p)) or self.load_hidden:
try:
loader = PyPDFLoader(str(i), extract_images=self.extract_images)
sub_docs = loader.load()
for doc in sub_docs:
doc.metadata["source"] = str(i)
docs.extend(sub_docs)
except Exception as e:
if self.silent_errors:
logger.warning(e)
else:
raise e
return docs
class PDFMinerLoader(BasePDFLoader):
"""Load `PDF` files using `PDFMiner`."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
concatenate_pages: bool = True,
) -> None:
"""Initialize with file path.
Args:
extract_images: Whether to extract images from PDF.
concatenate_pages: If True, concatenate all PDF pages into one a single
document. Otherwise, return one document per page.
"""
try:
from pdfminer.high_level import extract_text # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path, headers=headers)
self.parser = PDFMinerParser(
extract_images=extract_images, concatenate_pages=concatenate_pages
)
def load(self) -> List[Document]:
"""Eagerly load the content."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazily load documents."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
class PDFMinerPDFasHTMLLoader(BasePDFLoader):
"""Load `PDF` files as HTML content using `PDFMiner`."""
def __init__(self, file_path: str, *, headers: Optional[Dict] = None):
"""Initialize with a file path."""
try:
from pdfminer.high_level import extract_text_to_fp # noqa:F401
except ImportError:
raise ImportError(
"`pdfminer` package not found, please install it with "
"`pip install pdfminer.six`"
)
super().__init__(file_path, headers=headers)
def load(self) -> List[Document]:
"""Load file."""
from pdfminer.high_level import extract_text_to_fp
from pdfminer.layout import LAParams
from pdfminer.utils import open_filename
output_string = StringIO()
with open_filename(self.file_path, "rb") as fp:
extract_text_to_fp(
fp, # type: ignore[arg-type]
output_string,
codec="",
laparams=LAParams(),
output_type="html",
)
metadata = {"source": self.file_path}
return [Document(page_content=output_string.getvalue(), metadata=metadata)]
class PyMuPDFLoader(BasePDFLoader):
"""Load `PDF` files using `PyMuPDF`."""
def __init__(
self,
file_path: str,
*,
headers: Optional[Dict] = None,
extract_images: bool = False,
**kwargs: Any,
) -> None:
"""Initialize with a file path."""
try:
import fitz # noqa:F401
except ImportError:
raise ImportError(
"`PyMuPDF` package not found, please install it with "
"`pip install pymupdf`"
)
super().__init__(file_path, headers=headers)
self.extract_images = extract_images
self.text_kwargs = kwargs
def load(self, **kwargs: Any) -> List[Document]:
"""Load file."""
if kwargs:
logger.warning(
f"Received runtime arguments {kwargs}. Passing runtime args to `load`"
f" is deprecated. Please pass arguments during initialization instead."
)
text_kwargs = {**self.text_kwargs, **kwargs}
parser = PyMuPDFParser(
text_kwargs=text_kwargs, extract_images=self.extract_images
)
blob = Blob.from_path(self.file_path)
return parser.parse(blob)
# MathpixPDFLoader implementation taken largely from Daniel Gross's:
# https://gist.github.com/danielgross/3ab4104e14faccc12b49200843adab21
class MathpixPDFLoader(BasePDFLoader):
"""Load `PDF` files using `Mathpix` service."""
def __init__(
self,
file_path: str,
processed_file_format: str = "md",
max_wait_time_seconds: int = 500,
should_clean_pdf: bool = False,
extra_request_data: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Initialize with a file path.
Args:
file_path: a file for loading.
processed_file_format: a format of the processed file. Default is "md".
max_wait_time_seconds: a maximum time to wait for the response from
the server. Default is 500.
should_clean_pdf: a flag to clean the PDF file. Default is False.
extra_request_data: Additional request data.
**kwargs: additional keyword arguments.
"""
self.mathpix_api_key = get_from_dict_or_env(
kwargs, "mathpix_api_key", "MATHPIX_API_KEY"
)
self.mathpix_api_id = get_from_dict_or_env(
kwargs, "mathpix_api_id", "MATHPIX_API_ID"
)
super().__init__(file_path, **kwargs)
self.processed_file_format = processed_file_format
self.extra_request_data = (
extra_request_data if extra_request_data is not None else {}
)
self.max_wait_time_seconds = max_wait_time_seconds
self.should_clean_pdf = should_clean_pdf
@property
def _mathpix_headers(self) -> Dict[str, str]:
return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key}
@property
def url(self) -> str:
return "https://api.mathpix.com/v3/pdf"
@property
def data(self) -> dict:
options = {
"conversion_formats": {self.processed_file_format: True},
**self.extra_request_data,
}
return {"options_json": json.dumps(options)}
def send_pdf(self) -> str:
with open(self.file_path, "rb") as f:
files = {"file": f}
response = requests.post(
self.url, headers=self._mathpix_headers, files=files, data=self.data
)
response_data = response.json()
if "pdf_id" in response_data:
pdf_id = response_data["pdf_id"]
return pdf_id
else:
raise ValueError("Unable to send PDF to Mathpix.")
def wait_for_processing(self, pdf_id: str) -> None:
"""Wait for processing to complete.
Args:
pdf_id: a PDF id.
Returns: None
"""
url = self.url + "/" + pdf_id
for _ in range(0, self.max_wait_time_seconds, 5):
response = requests.get(url, headers=self.headers)
response_data = response.json()
status = response_data.get("status", None)
if status == "completed":
return
elif status == "error":
raise ValueError("Unable to retrieve PDF from Mathpix")
else:
print(f"Status: {status}, waiting for processing to complete")
time.sleep(5)
raise TimeoutError
def get_processed_pdf(self, pdf_id: str) -> str:
self.wait_for_processing(pdf_id)
url = f"{self.url}/{pdf_id}.{self.processed_file_format}"
response = requests.get(url, headers=self.headers)
return response.content.decode("utf-8")
def clean_pdf(self, contents: str) -> str:
"""Clean the PDF file.
Args:
contents: a PDF file contents.
Returns:
"""
contents = "\n".join(
[line for line in contents.split("\n") if not line.startswith("![]")]
)
# replace \section{Title} with # Title
contents = contents.replace("\\section{", "# ").replace("}", "")
# replace the "\" slash that Mathpix adds to escape $, %, (, etc.
contents = (
contents.replace(r"\$", "$")
.replace(r"\%", "%")
.replace(r"\(", "(")
.replace(r"\)", ")")
)
return contents
def load(self) -> List[Document]:
pdf_id = self.send_pdf()
contents = self.get_processed_pdf(pdf_id)
if self.should_clean_pdf:
contents = self.clean_pdf(contents)
metadata = {"source": self.source, "file_path": self.source}
return [Document(page_content=contents, metadata=metadata)]
class PDFPlumberLoader(BasePDFLoader):
"""Load `PDF` files using `pdfplumber`."""
def __init__(
self,
file_path: str,
text_kwargs: Optional[Mapping[str, Any]] = None,
dedupe: bool = False,
headers: Optional[Dict] = None,
extract_images: bool = False,
) -> None:
"""Initialize with a file path."""
try:
import pdfplumber # noqa:F401
except ImportError:
raise ImportError(
"pdfplumber package not found, please install it with "
"`pip install pdfplumber`"
)
super().__init__(file_path, headers=headers)
self.text_kwargs = text_kwargs or {}
self.dedupe = dedupe
self.extract_images = extract_images
def load(self) -> List[Document]:
"""Load file."""
parser = PDFPlumberParser(
text_kwargs=self.text_kwargs,
dedupe=self.dedupe,
extract_images=self.extract_images,
)
blob = Blob.from_path(self.file_path)
return parser.parse(blob)
class AmazonTextractPDFLoader(BasePDFLoader):
"""Load `PDF` files from a local file system, HTTP or S3.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Amazon Textract service.
Example:
.. code-block:: python
from langchain.document_loaders import AmazonTextractPDFLoader
loader = AmazonTextractPDFLoader(
file_path="s3://pdfs/myfile.pdf"
)
document = loader.load()
"""
def __init__(
self,
file_path: str,
textract_features: Optional[Sequence[str]] = None,
client: Optional[Any] = None,
credentials_profile_name: Optional[str] = None,
region_name: Optional[str] = None,
endpoint_url: Optional[str] = None,
headers: Optional[Dict] = None,
) -> None:
"""Initialize the loader.
Args:
file_path: A file, url or s3 path for input file
textract_features: Features to be used for extraction, each feature
should be passed as a str that conforms to the enum
`Textract_Features`, see `amazon-textract-caller` pkg
client: boto3 textract client (Optional)
credentials_profile_name: AWS profile name, if not default (Optional)
region_name: AWS region, eg us-east-1 (Optional)
endpoint_url: endpoint url for the textract service (Optional)
"""
super().__init__(file_path, headers=headers)
try:
import textractcaller as tc # noqa: F401
except ImportError:
raise ModuleNotFoundError(
"Could not import amazon-textract-caller python package. "
"Please install it with `pip install amazon-textract-caller`."
)
if textract_features:
features = [tc.Textract_Features[x] for x in textract_features]
else:
features = []
if credentials_profile_name or region_name or endpoint_url:
try:
import boto3
if credentials_profile_name is not None:
session = boto3.Session(profile_name=credentials_profile_name)
else:
# use default credentials
session = boto3.Session()
client_params = {}
if region_name:
client_params["region_name"] = region_name
if endpoint_url:
client_params["endpoint_url"] = endpoint_url
client = session.client("textract", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
self.parser = AmazonTextractPDFParser(textract_features=features, client=client)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load documents"""
# the self.file_path is local, but the blob has to include
# the S3 location if the file originated from S3 for multi-page documents
# raises ValueError when multi-page and not on S3"""
if self.web_path and self._is_s3_url(self.web_path):
blob = Blob(path=self.web_path)
else:
blob = Blob.from_path(self.file_path)
if AmazonTextractPDFLoader._get_number_of_pages(blob) > 1:
raise ValueError(
f"the file {blob.path} is a multi-page document, \
but not stored on S3. \
Textract requires multi-page documents to be on S3."
)
yield from self.parser.parse(blob)
@staticmethod
def _get_number_of_pages(blob: Blob) -> int:
try:
import pypdf
from PIL import Image, ImageSequence
except ImportError:
raise ModuleNotFoundError(
"Could not import pypdf or Pilloe python package. "
"Please install it with `pip install pypdf Pillow`."
)
if blob.mimetype == "application/pdf":
with blob.as_bytes_io() as input_pdf_file:
pdf_reader = pypdf.PdfReader(input_pdf_file)
return len(pdf_reader.pages)
elif blob.mimetype == "image/tiff":
num_pages = 0
img = Image.open(blob.as_bytes())
for _, _ in enumerate(ImageSequence.Iterator(img)):
num_pages += 1
return num_pages
elif blob.mimetype in ["image/png", "image/jpeg"]:
return 1
else:
raise ValueError(f"unsupported mime type: {blob.mimetype}")
class DocumentIntelligenceLoader(BasePDFLoader):
"""Loads a PDF with Azure Document Intelligence"""
def __init__(
self,
file_path: str,
client: Any,
model: str = "prebuilt-document",
headers: Optional[Dict] = None,
) -> None:
"""
Initialize the object for file processing with Azure Document Intelligence
(formerly Form Recognizer).
This constructor initializes a DocumentIntelligenceParser object to be used
for parsing files using the Azure Document Intelligence API. The load method
generates a Document node including metadata (source blob and page number)
for each page.
Parameters:
-----------
file_path : str
The path to the file that needs to be parsed.
client: Any
A DocumentAnalysisClient to perform the analysis of the blob
model : str
The model name or ID to be used for form recognition in Azure.
Examples:
---------
>>> obj = DocumentIntelligenceLoader(
... file_path="path/to/file",
... client=client,
... model="prebuilt-document"
... )
"""
self.parser = DocumentIntelligenceParser(client=client, model=model)
super().__init__(file_path, headers=headers)
def load(self) -> List[Document]:
"""Load given path as pages."""
return list(self.lazy_load())
def lazy_load(
self,
) -> Iterator[Document]:
"""Lazy load given path as pages."""
blob = Blob.from_path(self.file_path)
yield from self.parser.parse(blob)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/agents/load_tools.py | # flake8: noqa
"""Tools provide access to various resources and services.
LangChain has a large ecosystem of integrations with various external resources
like local and remote file systems, APIs and databases.
These integrations allow developers to create versatile applications that combine the
power of LLMs with the ability to access, interact with and manipulate external
resources.
When developing an application, developers should inspect the capabilities and
permissions of the tools that underlie the given agent toolkit, and determine
whether permissions of the given toolkit are appropriate for the application.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
import warnings
from typing import Any, Dict, List, Optional, Callable, Tuple
from mypy_extensions import Arg, KwArg
from langchain.agents.tools import Tool
from langchain_core.language_models import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import Callbacks
from langchain.chains.api import news_docs, open_meteo_docs, podcast_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.utilities.dalle_image_generator import DallEAPIWrapper
from langchain.utilities.requests import TextRequestsWrapper
from langchain.tools.arxiv.tool import ArxivQueryRun
from langchain.tools.golden_query.tool import GoldenQueryRun
from langchain.tools.pubmed.tool import PubmedQueryRun
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.tools.google_cloud.texttospeech import GoogleCloudTextToSpeechTool
from langchain.tools.google_lens.tool import GoogleLensQueryRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.google_scholar.tool import GoogleScholarQueryRun
from langchain.tools.google_finance.tool import GoogleFinanceQueryRun
from langchain.tools.google_trends.tool import GoogleTrendsQueryRun
from langchain.tools.metaphor_search.tool import MetaphorSearchResults
from langchain.tools.google_jobs.tool import GoogleJobsQueryRun
from langchain.tools.google_serper.tool import GoogleSerperResults, GoogleSerperRun
from langchain.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
from langchain.tools.graphql.tool import BaseGraphQLTool
from langchain.tools.human.tool import HumanInputRun
from langchain.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
from langchain.tools.scenexplain.tool import SceneXplainTool
from langchain.tools.searx_search.tool import SearxSearchResults, SearxSearchRun
from langchain.tools.shell.tool import ShellTool
from langchain.tools.sleep.tool import SleepTool
from langchain.tools.stackexchange.tool import StackExchangeTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchRun
from langchain.tools.dataforseo_api_search import DataForSeoAPISearchResults
from langchain.tools.memorize.tool import Memorize
from langchain.tools.reddit_search.tool import RedditSearchRun
from langchain.utilities.arxiv import ArxivAPIWrapper
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
from langchain.utilities.pubmed import PubMedAPIWrapper
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
from langchain.utilities.google_lens import GoogleLensAPIWrapper
from langchain.utilities.google_jobs import GoogleJobsAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
from langchain.utilities.google_trends import GoogleTrendsAPIWrapper
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
from langchain.utilities.awslambda import LambdaWrapper
from langchain.utilities.graphql import GraphQLAPIWrapper
from langchain.utilities.searchapi import SearchApiAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.stackexchange import StackExchangeAPIWrapper
from langchain.utilities.twilio import TwilioAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
from langchain.utilities.dataforseo_api_search import DataForSeoAPIWrapper
from langchain.utilities.reddit_search import RedditSearchAPIWrapper
def _get_python_repl() -> BaseTool:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _get_tools_requests_get() -> BaseTool:
return RequestsGetTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_post() -> BaseTool:
return RequestsPostTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_patch() -> BaseTool:
return RequestsPatchTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_put() -> BaseTool:
return RequestsPutTool(requests_wrapper=TextRequestsWrapper())
def _get_tools_requests_delete() -> BaseTool:
return RequestsDeleteTool(requests_wrapper=TextRequestsWrapper())
def _get_terminal() -> BaseTool:
return ShellTool()
def _get_sleep() -> BaseTool:
return SleepTool()
_BASE_TOOLS: Dict[str, Callable[[], BaseTool]] = {
"requests": _get_tools_requests_get, # preserved for backwards compatibility
"requests_get": _get_tools_requests_get,
"requests_post": _get_tools_requests_post,
"requests_patch": _get_tools_requests_patch,
"requests_put": _get_tools_requests_put,
"requests_delete": _get_tools_requests_delete,
"terminal": _get_terminal,
"sleep": _get_sleep,
}
def _get_llm_math(llm: BaseLanguageModel) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain.from_llm(llm=llm).run,
coroutine=LLMMathChain.from_llm(llm=llm).arun,
)
def _get_open_meteo_api(llm: BaseLanguageModel) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(
llm,
open_meteo_docs.OPEN_METEO_DOCS,
limit_to_domains=["https://api.open-meteo.com/"],
)
return Tool(
name="Open-Meteo-API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS: Dict[str, Callable[[BaseLanguageModel], BaseTool]] = {
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
news_docs.NEWS_DOCS,
headers={"X-Api-Key": news_api_key},
limit_to_domains=["https://newsapi.org/"],
)
return Tool(
name="News-API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
limit_to_domains=["https://api.themoviedb.org/"],
)
return Tool(
name="TMDB-API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_podcast_api(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
listen_api_key = kwargs["listen_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm,
podcast_docs.PODCAST_DOCS,
headers={"X-ListenAPI-Key": listen_api_key},
limit_to_domains=["https://listen-api.listennotes.com/"],
)
return Tool(
name="Podcast-API",
description="Use the Listen Notes Podcast API to search all podcasts or episodes. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_lambda_api(**kwargs: Any) -> BaseTool:
return Tool(
name=kwargs["awslambda_tool_name"],
description=kwargs["awslambda_tool_description"],
func=LambdaWrapper(**kwargs).run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_arxiv(**kwargs: Any) -> BaseTool:
return ArxivQueryRun(api_wrapper=ArxivAPIWrapper(**kwargs))
def _get_golden_query(**kwargs: Any) -> BaseTool:
return GoldenQueryRun(api_wrapper=GoldenQueryAPIWrapper(**kwargs))
def _get_pubmed(**kwargs: Any) -> BaseTool:
return PubmedQueryRun(api_wrapper=PubMedAPIWrapper(**kwargs))
def _get_google_jobs(**kwargs: Any) -> BaseTool:
return GoogleJobsQueryRun(api_wrapper=GoogleJobsAPIWrapper(**kwargs))
def _get_google_lens(**kwargs: Any) -> BaseTool:
return GoogleLensQueryRun(api_wrapper=GoogleLensAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return GoogleSerperRun(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_scholar(**kwargs: Any) -> BaseTool:
return GoogleScholarQueryRun(api_wrapper=GoogleScholarAPIWrapper(**kwargs))
def _get_google_finance(**kwargs: Any) -> BaseTool:
return GoogleFinanceQueryRun(api_wrapper=GoogleFinanceAPIWrapper(**kwargs))
def _get_google_trends(**kwargs: Any) -> BaseTool:
return GoogleTrendsQueryRun(api_wrapper=GoogleTrendsAPIWrapper(**kwargs))
def _get_google_serper_results_json(**kwargs: Any) -> BaseTool:
return GoogleSerperResults(api_wrapper=GoogleSerperAPIWrapper(**kwargs))
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_searchapi(**kwargs: Any) -> BaseTool:
return SearchAPIRun(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_searchapi_results_json(**kwargs: Any) -> BaseTool:
return SearchAPIResults(api_wrapper=SearchApiAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_stackexchange(**kwargs: Any) -> BaseTool:
return StackExchangeTool(api_wrapper=StackExchangeAPIWrapper(**kwargs))
def _get_dalle_image_generator(**kwargs: Any) -> Tool:
return Tool(
"Dall-E-Image-Generator",
DallEAPIWrapper(**kwargs).run,
"A wrapper around OpenAI DALL-E API. Useful for when you need to generate images from a text description. Input should be an image description.",
)
def _get_twilio(**kwargs: Any) -> BaseTool:
return Tool(
name="Text-Message",
description="Useful for when you need to send a text message to a provided phone number.",
func=TwilioAPIWrapper(**kwargs).run,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return SearxSearchRun(wrapper=SearxSearchWrapper(**kwargs))
def _get_searx_search_results_json(**kwargs: Any) -> BaseTool:
wrapper_kwargs = {k: v for k, v in kwargs.items() if k != "num_results"}
return SearxSearchResults(wrapper=SearxSearchWrapper(**wrapper_kwargs), **kwargs)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
def _get_metaphor_search(**kwargs: Any) -> BaseTool:
return MetaphorSearchResults(api_wrapper=MetaphorSearchAPIWrapper(**kwargs))
def _get_ddg_search(**kwargs: Any) -> BaseTool:
return DuckDuckGoSearchRun(api_wrapper=DuckDuckGoSearchAPIWrapper(**kwargs))
def _get_human_tool(**kwargs: Any) -> BaseTool:
return HumanInputRun(**kwargs)
def _get_scenexplain(**kwargs: Any) -> BaseTool:
return SceneXplainTool(**kwargs)
def _get_graphql_tool(**kwargs: Any) -> BaseTool:
graphql_endpoint = kwargs["graphql_endpoint"]
wrapper = GraphQLAPIWrapper(graphql_endpoint=graphql_endpoint)
return BaseGraphQLTool(graphql_wrapper=wrapper)
def _get_openweathermap(**kwargs: Any) -> BaseTool:
return OpenWeatherMapQueryRun(api_wrapper=OpenWeatherMapAPIWrapper(**kwargs))
def _get_dataforseo_api_search(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchRun(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_dataforseo_api_search_json(**kwargs: Any) -> BaseTool:
return DataForSeoAPISearchResults(api_wrapper=DataForSeoAPIWrapper(**kwargs))
def _get_eleven_labs_text2speech(**kwargs: Any) -> BaseTool:
return ElevenLabsText2SpeechTool(**kwargs)
def _get_memorize(llm: BaseLanguageModel, **kwargs: Any) -> BaseTool:
return Memorize(llm=llm)
def _get_google_cloud_texttospeech(**kwargs: Any) -> BaseTool:
return GoogleCloudTextToSpeechTool(**kwargs)
def _get_reddit_search(**kwargs: Any) -> BaseTool:
return RedditSearchRun(api_wrapper=RedditSearchAPIWrapper(**kwargs))
_EXTRA_LLM_TOOLS: Dict[
str,
Tuple[Callable[[Arg(BaseLanguageModel, "llm"), KwArg(Any)], BaseTool], List[str]],
] = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
"podcast-api": (_get_podcast_api, ["listen_api_key"]),
"memorize": (_get_memorize, []),
}
_EXTRA_OPTIONAL_TOOLS: Dict[str, Tuple[Callable[[KwArg(Any)], BaseTool], List[str]]] = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"searx-search-results-json": (
_get_searx_search_results_json,
["searx_host", "engines", "num_results", "aiosession"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"metaphor-search": (_get_metaphor_search, ["metaphor_api_key"]),
"ddg-search": (_get_ddg_search, []),
"google-lens": (_get_google_lens, ["serp_api_key"]),
"google-serper": (_get_google_serper, ["serper_api_key", "aiosession"]),
"google-scholar": (
_get_google_scholar,
["top_k_results", "hl", "lr", "serp_api_key"],
),
"google-finance": (
_get_google_finance,
["serp_api_key"],
),
"google-trends": (
_get_google_trends,
["serp_api_key"],
),
"google-jobs": (
_get_google_jobs,
["serp_api_key"],
),
"google-serper-results-json": (
_get_google_serper_results_json,
["serper_api_key", "aiosession"],
),
"searchapi": (_get_searchapi, ["searchapi_api_key", "aiosession"]),
"searchapi-results-json": (
_get_searchapi_results_json,
["searchapi_api_key", "aiosession"],
),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"dalle-image-generator": (_get_dalle_image_generator, ["openai_api_key"]),
"twilio": (_get_twilio, ["account_sid", "auth_token", "from_number"]),
"searx-search": (_get_searx_search, ["searx_host", "engines", "aiosession"]),
"wikipedia": (_get_wikipedia, ["top_k_results", "lang"]),
"arxiv": (
_get_arxiv,
["top_k_results", "load_max_docs", "load_all_available_meta"],
),
"golden-query": (_get_golden_query, ["golden_api_key"]),
"pubmed": (_get_pubmed, ["top_k_results"]),
"human": (_get_human_tool, ["prompt_func", "input_func"]),
"awslambda": (
_get_lambda_api,
["awslambda_tool_name", "awslambda_tool_description", "function_name"],
),
"stackexchange": (_get_stackexchange, []),
"sceneXplain": (_get_scenexplain, []),
"graphql": (_get_graphql_tool, ["graphql_endpoint"]),
"openweathermap-api": (_get_openweathermap, ["openweathermap_api_key"]),
"dataforseo-api-search": (
_get_dataforseo_api_search,
["api_login", "api_password", "aiosession"],
),
"dataforseo-api-search-json": (
_get_dataforseo_api_search_json,
["api_login", "api_password", "aiosession"],
),
"eleven_labs_text2speech": (_get_eleven_labs_text2speech, ["eleven_api_key"]),
"google_cloud_texttospeech": (_get_google_cloud_texttospeech, []),
"reddit_search": (
_get_reddit_search,
["reddit_client_id", "reddit_client_secret", "reddit_user_agent"],
),
}
def _handle_callbacks(
callback_manager: Optional[BaseCallbackManager], callbacks: Callbacks
) -> Callbacks:
if callback_manager is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
if callbacks is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks arguments."
)
return callback_manager
return callbacks
def load_huggingface_tool(
task_or_repo_id: str,
model_repo_id: Optional[str] = None,
token: Optional[str] = None,
remote: bool = False,
**kwargs: Any,
) -> BaseTool:
"""Loads a tool from the HuggingFace Hub.
Args:
task_or_repo_id: Task or model repo id.
model_repo_id: Optional model repo id.
token: Optional token.
remote: Optional remote. Defaults to False.
**kwargs:
Returns:
A tool.
"""
try:
from transformers import load_tool
except ImportError:
raise ImportError(
"HuggingFace tools require the libraries `transformers>=4.29.0`"
" and `huggingface_hub>=0.14.1` to be installed."
" Please install it with"
" `pip install --upgrade transformers huggingface_hub`."
)
hf_tool = load_tool(
task_or_repo_id,
model_repo_id=model_repo_id,
token=token,
remote=remote,
**kwargs,
)
outputs = hf_tool.outputs
if set(outputs) != {"text"}:
raise NotImplementedError("Multimodal outputs not supported yet.")
inputs = hf_tool.inputs
if set(inputs) != {"text"}:
raise NotImplementedError("Multimodal inputs not supported yet.")
return Tool.from_function(
hf_tool.__call__, name=hf_tool.name, description=hf_tool.description
)
def load_tools(
tool_names: List[str],
llm: Optional[BaseLanguageModel] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Tools allow agents to interact with various resources and services like
APIs, databases, file systems, etc.
Please scope the permissions of each tools to the minimum required for the
application.
For example, if an application only needs to read from a database,
the database tool should not be given write permissions. Moreover
consider scoping the permissions to only allow accessing specific
tables and impose user-level quota for limiting resource usage.
Please read the APIs of the individual tools to determine which configuration
they support.
See [Security](https://python.langchain.com/docs/security) for more information.
Args:
tool_names: name of tools to load.
llm: An optional language model, may be needed to initialize certain tools.
callbacks: Optional callback manager or list of callback handlers.
If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
callbacks = _handle_callbacks(
callback_manager=kwargs.get("callback_manager"), callbacks=callbacks
)
# print(_BASE_TOOLS)
# print(1)
for name in tool_names:
if name == "requests":
warnings.warn(
"tool name `requests` is deprecated - "
"please use `requests_all` or specify the requests method"
)
if name == "requests_all":
# expand requests into various methods
requests_method_tools = [
_tool for _tool in _BASE_TOOLS if _tool.startswith("requests_")
]
tool_names.extend(requests_method_tools)
elif name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
if callbacks is not None:
for tool in tools:
tool.callbacks = callbacks
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/tools/__init__.py | """**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
<name> # Examples: BraveSearch, HumanInputRun
**Main helpers:**
.. code-block::
CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
"""
from typing import Any
from langchain.tools.base import BaseTool, StructuredTool, Tool, tool
# Used for internal purposes
_DEPRECATED_TOOLS = {"PythonAstREPLTool", "PythonREPLTool"}
def _import_ainetwork_app() -> Any:
from langchain.tools.ainetwork.app import AINAppOps
return AINAppOps
def _import_ainetwork_owner() -> Any:
from langchain.tools.ainetwork.owner import AINOwnerOps
return AINOwnerOps
def _import_ainetwork_rule() -> Any:
from langchain.tools.ainetwork.rule import AINRuleOps
return AINRuleOps
def _import_ainetwork_transfer() -> Any:
from langchain.tools.ainetwork.transfer import AINTransfer
return AINTransfer
def _import_ainetwork_value() -> Any:
from langchain.tools.ainetwork.value import AINValueOps
return AINValueOps
def _import_arxiv_tool() -> Any:
from langchain.tools.arxiv.tool import ArxivQueryRun
return ArxivQueryRun
def _import_azure_cognitive_services_AzureCogsFormRecognizerTool() -> Any:
from langchain.tools.azure_cognitive_services import AzureCogsFormRecognizerTool
return AzureCogsFormRecognizerTool
def _import_azure_cognitive_services_AzureCogsImageAnalysisTool() -> Any:
from langchain.tools.azure_cognitive_services import AzureCogsImageAnalysisTool
return AzureCogsImageAnalysisTool
def _import_azure_cognitive_services_AzureCogsSpeech2TextTool() -> Any:
from langchain.tools.azure_cognitive_services import AzureCogsSpeech2TextTool
return AzureCogsSpeech2TextTool
def _import_azure_cognitive_services_AzureCogsText2SpeechTool() -> Any:
from langchain.tools.azure_cognitive_services import AzureCogsText2SpeechTool
return AzureCogsText2SpeechTool
def _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool() -> Any:
from langchain.tools.azure_cognitive_services import (
AzureCogsTextAnalyticsHealthTool,
)
return AzureCogsTextAnalyticsHealthTool
def _import_bing_search_tool_BingSearchResults() -> Any:
from langchain.tools.bing_search.tool import BingSearchResults
return BingSearchResults
def _import_bing_search_tool_BingSearchRun() -> Any:
from langchain.tools.bing_search.tool import BingSearchRun
return BingSearchRun
def _import_brave_search_tool() -> Any:
from langchain.tools.brave_search.tool import BraveSearch
return BraveSearch
def _import_ddg_search_tool_DuckDuckGoSearchResults() -> Any:
from langchain.tools.ddg_search.tool import DuckDuckGoSearchResults
return DuckDuckGoSearchResults
def _import_ddg_search_tool_DuckDuckGoSearchRun() -> Any:
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
return DuckDuckGoSearchRun
def _import_edenai_EdenAiExplicitImageTool() -> Any:
from langchain.tools.edenai import EdenAiExplicitImageTool
return EdenAiExplicitImageTool
def _import_edenai_EdenAiObjectDetectionTool() -> Any:
from langchain.tools.edenai import EdenAiObjectDetectionTool
return EdenAiObjectDetectionTool
def _import_edenai_EdenAiParsingIDTool() -> Any:
from langchain.tools.edenai import EdenAiParsingIDTool
return EdenAiParsingIDTool
def _import_edenai_EdenAiParsingInvoiceTool() -> Any:
from langchain.tools.edenai import EdenAiParsingInvoiceTool
return EdenAiParsingInvoiceTool
def _import_edenai_EdenAiSpeechToTextTool() -> Any:
from langchain.tools.edenai import EdenAiSpeechToTextTool
return EdenAiSpeechToTextTool
def _import_edenai_EdenAiTextModerationTool() -> Any:
from langchain.tools.edenai import EdenAiTextModerationTool
return EdenAiTextModerationTool
def _import_edenai_EdenAiTextToSpeechTool() -> Any:
from langchain.tools.edenai import EdenAiTextToSpeechTool
return EdenAiTextToSpeechTool
def _import_edenai_EdenaiTool() -> Any:
from langchain.tools.edenai import EdenaiTool
return EdenaiTool
def _import_eleven_labs_text2speech() -> Any:
from langchain.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
return ElevenLabsText2SpeechTool
def _import_file_management_CopyFileTool() -> Any:
from langchain.tools.file_management import CopyFileTool
return CopyFileTool
def _import_file_management_DeleteFileTool() -> Any:
from langchain.tools.file_management import DeleteFileTool
return DeleteFileTool
def _import_file_management_FileSearchTool() -> Any:
from langchain.tools.file_management import FileSearchTool
return FileSearchTool
def _import_file_management_ListDirectoryTool() -> Any:
from langchain.tools.file_management import ListDirectoryTool
return ListDirectoryTool
def _import_file_management_MoveFileTool() -> Any:
from langchain.tools.file_management import MoveFileTool
return MoveFileTool
def _import_file_management_ReadFileTool() -> Any:
from langchain.tools.file_management import ReadFileTool
return ReadFileTool
def _import_file_management_WriteFileTool() -> Any:
from langchain.tools.file_management import WriteFileTool
return WriteFileTool
def _import_gmail_GmailCreateDraft() -> Any:
from langchain.tools.gmail import GmailCreateDraft
return GmailCreateDraft
def _import_gmail_GmailGetMessage() -> Any:
from langchain.tools.gmail import GmailGetMessage
return GmailGetMessage
def _import_gmail_GmailGetThread() -> Any:
from langchain.tools.gmail import GmailGetThread
return GmailGetThread
def _import_gmail_GmailSearch() -> Any:
from langchain.tools.gmail import GmailSearch
return GmailSearch
def _import_gmail_GmailSendMessage() -> Any:
from langchain.tools.gmail import GmailSendMessage
return GmailSendMessage
def _import_google_cloud_texttospeech() -> Any:
from langchain.tools.google_cloud.texttospeech import GoogleCloudTextToSpeechTool
return GoogleCloudTextToSpeechTool
def _import_google_places_tool() -> Any:
from langchain.tools.google_places.tool import GooglePlacesTool
return GooglePlacesTool
def _import_google_search_tool_GoogleSearchResults() -> Any:
from langchain.tools.google_search.tool import GoogleSearchResults
return GoogleSearchResults
def _import_google_search_tool_GoogleSearchRun() -> Any:
from langchain.tools.google_search.tool import GoogleSearchRun
return GoogleSearchRun
def _import_google_serper_tool_GoogleSerperResults() -> Any:
from langchain.tools.google_serper.tool import GoogleSerperResults
return GoogleSerperResults
def _import_google_serper_tool_GoogleSerperRun() -> Any:
from langchain.tools.google_serper.tool import GoogleSerperRun
return GoogleSerperRun
def _import_graphql_tool() -> Any:
from langchain.tools.graphql.tool import BaseGraphQLTool
return BaseGraphQLTool
def _import_human_tool() -> Any:
from langchain.tools.human.tool import HumanInputRun
return HumanInputRun
def _import_ifttt() -> Any:
from langchain.tools.ifttt import IFTTTWebhook
return IFTTTWebhook
def _import_interaction_tool() -> Any:
from langchain.tools.interaction.tool import StdInInquireTool
return StdInInquireTool
def _import_jira_tool() -> Any:
from langchain.tools.jira.tool import JiraAction
return JiraAction
def _import_json_tool_JsonGetValueTool() -> Any:
from langchain.tools.json.tool import JsonGetValueTool
return JsonGetValueTool
def _import_json_tool_JsonListKeysTool() -> Any:
from langchain.tools.json.tool import JsonListKeysTool
return JsonListKeysTool
def _import_metaphor_search() -> Any:
from langchain.tools.metaphor_search import MetaphorSearchResults
return MetaphorSearchResults
def _import_office365_create_draft_message() -> Any:
from langchain.tools.office365.create_draft_message import O365CreateDraftMessage
return O365CreateDraftMessage
def _import_office365_events_search() -> Any:
from langchain.tools.office365.events_search import O365SearchEvents
return O365SearchEvents
def _import_office365_messages_search() -> Any:
from langchain.tools.office365.messages_search import O365SearchEmails
return O365SearchEmails
def _import_office365_send_event() -> Any:
from langchain.tools.office365.send_event import O365SendEvent
return O365SendEvent
def _import_office365_send_message() -> Any:
from langchain.tools.office365.send_message import O365SendMessage
return O365SendMessage
def _import_office365_utils() -> Any:
from langchain.tools.office365.utils import authenticate
return authenticate
def _import_openapi_utils_api_models() -> Any:
from langchain.tools.openapi.utils.api_models import APIOperation
return APIOperation
def _import_openapi_utils_openapi_utils() -> Any:
from langchain.tools.openapi.utils.openapi_utils import OpenAPISpec
return OpenAPISpec
def _import_openweathermap_tool() -> Any:
from langchain.tools.openweathermap.tool import OpenWeatherMapQueryRun
return OpenWeatherMapQueryRun
def _import_playwright_ClickTool() -> Any:
from langchain.tools.playwright import ClickTool
return ClickTool
def _import_playwright_CurrentWebPageTool() -> Any:
from langchain.tools.playwright import CurrentWebPageTool
return CurrentWebPageTool
def _import_playwright_ExtractHyperlinksTool() -> Any:
from langchain.tools.playwright import ExtractHyperlinksTool
return ExtractHyperlinksTool
def _import_playwright_ExtractTextTool() -> Any:
from langchain.tools.playwright import ExtractTextTool
return ExtractTextTool
def _import_playwright_GetElementsTool() -> Any:
from langchain.tools.playwright import GetElementsTool
return GetElementsTool
def _import_playwright_NavigateBackTool() -> Any:
from langchain.tools.playwright import NavigateBackTool
return NavigateBackTool
def _import_playwright_NavigateTool() -> Any:
from langchain.tools.playwright import NavigateTool
return NavigateTool
def _import_plugin() -> Any:
from langchain.tools.plugin import AIPluginTool
return AIPluginTool
def _import_powerbi_tool_InfoPowerBITool() -> Any:
from langchain.tools.powerbi.tool import InfoPowerBITool
return InfoPowerBITool
def _import_powerbi_tool_ListPowerBITool() -> Any:
from langchain.tools.powerbi.tool import ListPowerBITool
return ListPowerBITool
def _import_powerbi_tool_QueryPowerBITool() -> Any:
from langchain.tools.powerbi.tool import QueryPowerBITool
return QueryPowerBITool
def _import_pubmed_tool() -> Any:
from langchain.tools.pubmed.tool import PubmedQueryRun
return PubmedQueryRun
def _import_python_tool_PythonAstREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_python_tool_PythonREPLTool() -> Any:
raise ImportError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
def _import_reddit_search_RedditSearchRun() -> Any:
from langchain.tools.reddit_search.tool import RedditSearchRun
return RedditSearchRun
def _import_render() -> Any:
from langchain.tools.render import format_tool_to_openai_function
return format_tool_to_openai_function
def _import_requests_tool_BaseRequestsTool() -> Any:
from langchain.tools.requests.tool import BaseRequestsTool
return BaseRequestsTool
def _import_requests_tool_RequestsDeleteTool() -> Any:
from langchain.tools.requests.tool import RequestsDeleteTool
return RequestsDeleteTool
def _import_requests_tool_RequestsGetTool() -> Any:
from langchain.tools.requests.tool import RequestsGetTool
return RequestsGetTool
def _import_requests_tool_RequestsPatchTool() -> Any:
from langchain.tools.requests.tool import RequestsPatchTool
return RequestsPatchTool
def _import_requests_tool_RequestsPostTool() -> Any:
from langchain.tools.requests.tool import RequestsPostTool
return RequestsPostTool
def _import_requests_tool_RequestsPutTool() -> Any:
from langchain.tools.requests.tool import RequestsPutTool
return RequestsPutTool
def _import_scenexplain_tool() -> Any:
from langchain.tools.scenexplain.tool import SceneXplainTool
return SceneXplainTool
def _import_searx_search_tool_SearxSearchResults() -> Any:
from langchain.tools.searx_search.tool import SearxSearchResults
return SearxSearchResults
def _import_searx_search_tool_SearxSearchRun() -> Any:
from langchain.tools.searx_search.tool import SearxSearchRun
return SearxSearchRun
def _import_shell_tool() -> Any:
from langchain.tools.shell.tool import ShellTool
return ShellTool
def _import_sleep_tool() -> Any:
from langchain.tools.sleep.tool import SleepTool
return SleepTool
def _import_spark_sql_tool_BaseSparkSQLTool() -> Any:
from langchain.tools.spark_sql.tool import BaseSparkSQLTool
return BaseSparkSQLTool
def _import_spark_sql_tool_InfoSparkSQLTool() -> Any:
from langchain.tools.spark_sql.tool import InfoSparkSQLTool
return InfoSparkSQLTool
def _import_spark_sql_tool_ListSparkSQLTool() -> Any:
from langchain.tools.spark_sql.tool import ListSparkSQLTool
return ListSparkSQLTool
def _import_spark_sql_tool_QueryCheckerTool() -> Any:
from langchain.tools.spark_sql.tool import QueryCheckerTool
return QueryCheckerTool
def _import_spark_sql_tool_QuerySparkSQLTool() -> Any:
from langchain.tools.spark_sql.tool import QuerySparkSQLTool
return QuerySparkSQLTool
def _import_sql_database_tool_BaseSQLDatabaseTool() -> Any:
from langchain.tools.sql_database.tool import BaseSQLDatabaseTool
return BaseSQLDatabaseTool
def _import_sql_database_tool_InfoSQLDatabaseTool() -> Any:
from langchain.tools.sql_database.tool import InfoSQLDatabaseTool
return InfoSQLDatabaseTool
def _import_sql_database_tool_ListSQLDatabaseTool() -> Any:
from langchain.tools.sql_database.tool import ListSQLDatabaseTool
return ListSQLDatabaseTool
def _import_sql_database_tool_QuerySQLCheckerTool() -> Any:
from langchain.tools.sql_database.tool import QuerySQLCheckerTool
return QuerySQLCheckerTool
def _import_sql_database_tool_QuerySQLDataBaseTool() -> Any:
from langchain.tools.sql_database.tool import QuerySQLDataBaseTool
return QuerySQLDataBaseTool
def _import_stackexchange_tool() -> Any:
from langchain.tools.stackexchange.tool import StackExchangeTool
return StackExchangeTool
def _import_steamship_image_generation() -> Any:
from langchain.tools.steamship_image_generation import SteamshipImageGenerationTool
return SteamshipImageGenerationTool
def _import_vectorstore_tool_VectorStoreQATool() -> Any:
from langchain.tools.vectorstore.tool import VectorStoreQATool
return VectorStoreQATool
def _import_vectorstore_tool_VectorStoreQAWithSourcesTool() -> Any:
from langchain.tools.vectorstore.tool import VectorStoreQAWithSourcesTool
return VectorStoreQAWithSourcesTool
def _import_wikipedia_tool() -> Any:
from langchain.tools.wikipedia.tool import WikipediaQueryRun
return WikipediaQueryRun
def _import_wolfram_alpha_tool() -> Any:
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
return WolframAlphaQueryRun
def _import_yahoo_finance_news() -> Any:
from langchain.tools.yahoo_finance_news import YahooFinanceNewsTool
return YahooFinanceNewsTool
def _import_youtube_search() -> Any:
from langchain.tools.youtube.search import YouTubeSearchTool
return YouTubeSearchTool
def _import_zapier_tool_ZapierNLAListActions() -> Any:
from langchain.tools.zapier.tool import ZapierNLAListActions
return ZapierNLAListActions
def _import_zapier_tool_ZapierNLARunAction() -> Any:
from langchain.tools.zapier.tool import ZapierNLARunAction
return ZapierNLARunAction
def _import_bearly_tool() -> Any:
from langchain.tools.bearly.tool import BearlyInterpreterTool
return BearlyInterpreterTool
def _import_e2b_data_analysis() -> Any:
from langchain.tools.e2b_data_analysis.tool import E2BDataAnalysisTool
return E2BDataAnalysisTool
def __getattr__(name: str) -> Any:
if name == "AINAppOps":
return _import_ainetwork_app()
elif name == "AINOwnerOps":
return _import_ainetwork_owner()
elif name == "AINRuleOps":
return _import_ainetwork_rule()
elif name == "AINTransfer":
return _import_ainetwork_transfer()
elif name == "AINValueOps":
return _import_ainetwork_value()
elif name == "ArxivQueryRun":
return _import_arxiv_tool()
elif name == "AzureCogsFormRecognizerTool":
return _import_azure_cognitive_services_AzureCogsFormRecognizerTool()
elif name == "AzureCogsImageAnalysisTool":
return _import_azure_cognitive_services_AzureCogsImageAnalysisTool()
elif name == "AzureCogsSpeech2TextTool":
return _import_azure_cognitive_services_AzureCogsSpeech2TextTool()
elif name == "AzureCogsText2SpeechTool":
return _import_azure_cognitive_services_AzureCogsText2SpeechTool()
elif name == "AzureCogsTextAnalyticsHealthTool":
return _import_azure_cognitive_services_AzureCogsTextAnalyticsHealthTool()
elif name == "BingSearchResults":
return _import_bing_search_tool_BingSearchResults()
elif name == "BingSearchRun":
return _import_bing_search_tool_BingSearchRun()
elif name == "BraveSearch":
return _import_brave_search_tool()
elif name == "DuckDuckGoSearchResults":
return _import_ddg_search_tool_DuckDuckGoSearchResults()
elif name == "DuckDuckGoSearchRun":
return _import_ddg_search_tool_DuckDuckGoSearchRun()
elif name == "EdenAiExplicitImageTool":
return _import_edenai_EdenAiExplicitImageTool()
elif name == "EdenAiObjectDetectionTool":
return _import_edenai_EdenAiObjectDetectionTool()
elif name == "EdenAiParsingIDTool":
return _import_edenai_EdenAiParsingIDTool()
elif name == "EdenAiParsingInvoiceTool":
return _import_edenai_EdenAiParsingInvoiceTool()
elif name == "EdenAiSpeechToTextTool":
return _import_edenai_EdenAiSpeechToTextTool()
elif name == "EdenAiTextModerationTool":
return _import_edenai_EdenAiTextModerationTool()
elif name == "EdenAiTextToSpeechTool":
return _import_edenai_EdenAiTextToSpeechTool()
elif name == "EdenaiTool":
return _import_edenai_EdenaiTool()
elif name == "ElevenLabsText2SpeechTool":
return _import_eleven_labs_text2speech()
elif name == "CopyFileTool":
return _import_file_management_CopyFileTool()
elif name == "DeleteFileTool":
return _import_file_management_DeleteFileTool()
elif name == "FileSearchTool":
return _import_file_management_FileSearchTool()
elif name == "ListDirectoryTool":
return _import_file_management_ListDirectoryTool()
elif name == "MoveFileTool":
return _import_file_management_MoveFileTool()
elif name == "ReadFileTool":
return _import_file_management_ReadFileTool()
elif name == "WriteFileTool":
return _import_file_management_WriteFileTool()
elif name == "GmailCreateDraft":
return _import_gmail_GmailCreateDraft()
elif name == "GmailGetMessage":
return _import_gmail_GmailGetMessage()
elif name == "GmailGetThread":
return _import_gmail_GmailGetThread()
elif name == "GmailSearch":
return _import_gmail_GmailSearch()
elif name == "GmailSendMessage":
return _import_gmail_GmailSendMessage()
elif name == "GoogleCloudTextToSpeechTool":
return _import_google_cloud_texttospeech()
elif name == "GooglePlacesTool":
return _import_google_places_tool()
elif name == "GoogleSearchResults":
return _import_google_search_tool_GoogleSearchResults()
elif name == "GoogleSearchRun":
return _import_google_search_tool_GoogleSearchRun()
elif name == "GoogleSerperResults":
return _import_google_serper_tool_GoogleSerperResults()
elif name == "GoogleSerperRun":
return _import_google_serper_tool_GoogleSerperRun()
elif name == "BaseGraphQLTool":
return _import_graphql_tool()
elif name == "HumanInputRun":
return _import_human_tool()
elif name == "IFTTTWebhook":
return _import_ifttt()
elif name == "StdInInquireTool":
return _import_interaction_tool()
elif name == "JiraAction":
return _import_jira_tool()
elif name == "JsonGetValueTool":
return _import_json_tool_JsonGetValueTool()
elif name == "JsonListKeysTool":
return _import_json_tool_JsonListKeysTool()
elif name == "MetaphorSearchResults":
return _import_metaphor_search()
elif name == "O365CreateDraftMessage":
return _import_office365_create_draft_message()
elif name == "O365SearchEvents":
return _import_office365_events_search()
elif name == "O365SearchEmails":
return _import_office365_messages_search()
elif name == "O365SendEvent":
return _import_office365_send_event()
elif name == "O365SendMessage":
return _import_office365_send_message()
elif name == "authenticate":
return _import_office365_utils()
elif name == "APIOperation":
return _import_openapi_utils_api_models()
elif name == "OpenAPISpec":
return _import_openapi_utils_openapi_utils()
elif name == "OpenWeatherMapQueryRun":
return _import_openweathermap_tool()
elif name == "ClickTool":
return _import_playwright_ClickTool()
elif name == "CurrentWebPageTool":
return _import_playwright_CurrentWebPageTool()
elif name == "ExtractHyperlinksTool":
return _import_playwright_ExtractHyperlinksTool()
elif name == "ExtractTextTool":
return _import_playwright_ExtractTextTool()
elif name == "GetElementsTool":
return _import_playwright_GetElementsTool()
elif name == "NavigateBackTool":
return _import_playwright_NavigateBackTool()
elif name == "NavigateTool":
return _import_playwright_NavigateTool()
elif name == "AIPluginTool":
return _import_plugin()
elif name == "InfoPowerBITool":
return _import_powerbi_tool_InfoPowerBITool()
elif name == "ListPowerBITool":
return _import_powerbi_tool_ListPowerBITool()
elif name == "QueryPowerBITool":
return _import_powerbi_tool_QueryPowerBITool()
elif name == "PubmedQueryRun":
return _import_pubmed_tool()
elif name == "PythonAstREPLTool":
return _import_python_tool_PythonAstREPLTool()
elif name == "PythonREPLTool":
return _import_python_tool_PythonREPLTool()
elif name == "RedditSearchRun":
return _import_reddit_search_RedditSearchRun()
elif name == "format_tool_to_openai_function":
return _import_render()
elif name == "BaseRequestsTool":
return _import_requests_tool_BaseRequestsTool()
elif name == "RequestsDeleteTool":
return _import_requests_tool_RequestsDeleteTool()
elif name == "RequestsGetTool":
return _import_requests_tool_RequestsGetTool()
elif name == "RequestsPatchTool":
return _import_requests_tool_RequestsPatchTool()
elif name == "RequestsPostTool":
return _import_requests_tool_RequestsPostTool()
elif name == "RequestsPutTool":
return _import_requests_tool_RequestsPutTool()
elif name == "SceneXplainTool":
return _import_scenexplain_tool()
elif name == "SearxSearchResults":
return _import_searx_search_tool_SearxSearchResults()
elif name == "SearxSearchRun":
return _import_searx_search_tool_SearxSearchRun()
elif name == "ShellTool":
return _import_shell_tool()
elif name == "SleepTool":
return _import_sleep_tool()
elif name == "BaseSparkSQLTool":
return _import_spark_sql_tool_BaseSparkSQLTool()
elif name == "InfoSparkSQLTool":
return _import_spark_sql_tool_InfoSparkSQLTool()
elif name == "ListSparkSQLTool":
return _import_spark_sql_tool_ListSparkSQLTool()
elif name == "QueryCheckerTool":
return _import_spark_sql_tool_QueryCheckerTool()
elif name == "QuerySparkSQLTool":
return _import_spark_sql_tool_QuerySparkSQLTool()
elif name == "BaseSQLDatabaseTool":
return _import_sql_database_tool_BaseSQLDatabaseTool()
elif name == "InfoSQLDatabaseTool":
return _import_sql_database_tool_InfoSQLDatabaseTool()
elif name == "ListSQLDatabaseTool":
return _import_sql_database_tool_ListSQLDatabaseTool()
elif name == "QuerySQLCheckerTool":
return _import_sql_database_tool_QuerySQLCheckerTool()
elif name == "QuerySQLDataBaseTool":
return _import_sql_database_tool_QuerySQLDataBaseTool()
elif name == "StackExchangeTool":
return _import_stackexchange_tool()
elif name == "SteamshipImageGenerationTool":
return _import_steamship_image_generation()
elif name == "VectorStoreQATool":
return _import_vectorstore_tool_VectorStoreQATool()
elif name == "VectorStoreQAWithSourcesTool":
return _import_vectorstore_tool_VectorStoreQAWithSourcesTool()
elif name == "WikipediaQueryRun":
return _import_wikipedia_tool()
elif name == "WolframAlphaQueryRun":
return _import_wolfram_alpha_tool()
elif name == "YahooFinanceNewsTool":
return _import_yahoo_finance_news()
elif name == "YouTubeSearchTool":
return _import_youtube_search()
elif name == "ZapierNLAListActions":
return _import_zapier_tool_ZapierNLAListActions()
elif name == "ZapierNLARunAction":
return _import_zapier_tool_ZapierNLARunAction()
elif name == "BearlyInterpreterTool":
return _import_bearly_tool()
elif name == "E2BDataAnalysisTool":
return _import_e2b_data_analysis()
else:
raise AttributeError(f"Could not find: {name}")
__all__ = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MetaphorSearchResults",
"MoveFileTool",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"authenticate",
"format_tool_to_openai_function",
"tool",
]
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/tools/merriam_webster/__init__.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/tools/merriam_webster/tool.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/utilities/__init__.py | """**Utilities** are the integrations with third-part systems and packages.
Other LangChain classes use **Utilities** to interact with third-part systems
and packages.
"""
from typing import Any
from langchain.utilities.requests import Requests, RequestsWrapper, TextRequestsWrapper
def _import_alpha_vantage() -> Any:
from langchain.utilities.alpha_vantage import AlphaVantageAPIWrapper
return AlphaVantageAPIWrapper
def _import_apify() -> Any:
from langchain.utilities.apify import ApifyWrapper
return ApifyWrapper
def _import_arcee() -> Any:
from langchain.utilities.arcee import ArceeWrapper
return ArceeWrapper
def _import_arxiv() -> Any:
from langchain.utilities.arxiv import ArxivAPIWrapper
return ArxivAPIWrapper
def _import_awslambda() -> Any:
from langchain.utilities.awslambda import LambdaWrapper
return LambdaWrapper
def _import_bibtex() -> Any:
from langchain.utilities.bibtex import BibtexparserWrapper
return BibtexparserWrapper
def _import_bing_search() -> Any:
from langchain.utilities.bing_search import BingSearchAPIWrapper
return BingSearchAPIWrapper
def _import_brave_search() -> Any:
from langchain.utilities.brave_search import BraveSearchWrapper
return BraveSearchWrapper
def _import_duckduckgo_search() -> Any:
from langchain.utilities.duckduckgo_search import DuckDuckGoSearchAPIWrapper
return DuckDuckGoSearchAPIWrapper
def _import_golden_query() -> Any:
from langchain.utilities.golden_query import GoldenQueryAPIWrapper
return GoldenQueryAPIWrapper
def _import_google_lens() -> Any:
from langchain.utilities.google_lens import GoogleLensAPIWrapper
return GoogleLensAPIWrapper
def _import_google_places_api() -> Any:
from langchain.utilities.google_places_api import GooglePlacesAPIWrapper
return GooglePlacesAPIWrapper
def _import_google_jobs() -> Any:
from langchain.utilities.google_jobs import GoogleJobsAPIWrapper
return GoogleJobsAPIWrapper
def _import_google_scholar() -> Any:
from langchain.utilities.google_scholar import GoogleScholarAPIWrapper
return GoogleScholarAPIWrapper
def _import_google_trends() -> Any:
from langchain.utilities.google_trends import GoogleTrendsAPIWrapper
return GoogleTrendsAPIWrapper
def _import_google_finance() -> Any:
from langchain.utilities.google_finance import GoogleFinanceAPIWrapper
return GoogleFinanceAPIWrapper
def _import_google_search() -> Any:
from langchain.utilities.google_search import GoogleSearchAPIWrapper
return GoogleSearchAPIWrapper
def _import_google_serper() -> Any:
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
return GoogleSerperAPIWrapper
def _import_graphql() -> Any:
from langchain.utilities.graphql import GraphQLAPIWrapper
return GraphQLAPIWrapper
def _import_jira() -> Any:
from langchain.utilities.jira import JiraAPIWrapper
return JiraAPIWrapper
def _import_max_compute() -> Any:
from langchain.utilities.max_compute import MaxComputeAPIWrapper
return MaxComputeAPIWrapper
def _import_metaphor_search() -> Any:
from langchain.utilities.metaphor_search import MetaphorSearchAPIWrapper
return MetaphorSearchAPIWrapper
def _import_openweathermap() -> Any:
from langchain.utilities.openweathermap import OpenWeatherMapAPIWrapper
return OpenWeatherMapAPIWrapper
def _import_outline() -> Any:
from langchain.utilities.outline import OutlineAPIWrapper
return OutlineAPIWrapper
def _import_portkey() -> Any:
from langchain.utilities.portkey import Portkey
return Portkey
def _import_powerbi() -> Any:
from langchain.utilities.powerbi import PowerBIDataset
return PowerBIDataset
def _import_pubmed() -> Any:
from langchain.utilities.pubmed import PubMedAPIWrapper
return PubMedAPIWrapper
def _import_python() -> Any:
from langchain.utilities.python import PythonREPL
return PythonREPL
def _import_scenexplain() -> Any:
from langchain.utilities.scenexplain import SceneXplainAPIWrapper
return SceneXplainAPIWrapper
def _import_searchapi() -> Any:
from langchain.utilities.searchapi import SearchApiAPIWrapper
return SearchApiAPIWrapper
def _import_searx_search() -> Any:
from langchain.utilities.searx_search import SearxSearchWrapper
return SearxSearchWrapper
def _import_serpapi() -> Any:
from langchain.utilities.serpapi import SerpAPIWrapper
return SerpAPIWrapper
def _import_spark_sql() -> Any:
from langchain.utilities.spark_sql import SparkSQL
return SparkSQL
def _import_sql_database() -> Any:
from langchain.utilities.sql_database import SQLDatabase
return SQLDatabase
def _import_stackexchange() -> Any:
from langchain.utilities.stackexchange import StackExchangeAPIWrapper
return StackExchangeAPIWrapper
def _import_tensorflow_datasets() -> Any:
from langchain.utilities.tensorflow_datasets import TensorflowDatasets
return TensorflowDatasets
def _import_twilio() -> Any:
from langchain.utilities.twilio import TwilioAPIWrapper
return TwilioAPIWrapper
def _import_wikipedia() -> Any:
from langchain.utilities.wikipedia import WikipediaAPIWrapper
return WikipediaAPIWrapper
def _import_wolfram_alpha() -> Any:
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
return WolframAlphaAPIWrapper
def _import_zapier() -> Any:
from langchain.utilities.zapier import ZapierNLAWrapper
return ZapierNLAWrapper
def __getattr__(name: str) -> Any:
if name == "AlphaVantageAPIWrapper":
return _import_alpha_vantage()
elif name == "ApifyWrapper":
return _import_apify()
elif name == "ArceeWrapper":
return _import_arcee()
elif name == "ArxivAPIWrapper":
return _import_arxiv()
elif name == "LambdaWrapper":
return _import_awslambda()
elif name == "BibtexparserWrapper":
return _import_bibtex()
elif name == "BingSearchAPIWrapper":
return _import_bing_search()
elif name == "BraveSearchWrapper":
return _import_brave_search()
elif name == "DuckDuckGoSearchAPIWrapper":
return _import_duckduckgo_search()
elif name == "GoogleLensAPIWrapper":
return _import_google_lens()
elif name == "GoldenQueryAPIWrapper":
return _import_golden_query()
elif name == "GoogleJobsAPIWrapper":
return _import_google_jobs()
elif name == "GoogleScholarAPIWrapper":
return _import_google_scholar()
elif name == "GoogleFinanceAPIWrapper":
return _import_google_finance()
elif name == "GoogleTrendsAPIWrapper":
return _import_google_trends()
elif name == "GooglePlacesAPIWrapper":
return _import_google_places_api()
elif name == "GoogleSearchAPIWrapper":
return _import_google_search()
elif name == "GoogleSerperAPIWrapper":
return _import_google_serper()
elif name == "GraphQLAPIWrapper":
return _import_graphql()
elif name == "JiraAPIWrapper":
return _import_jira()
elif name == "MaxComputeAPIWrapper":
return _import_max_compute()
elif name == "MetaphorSearchAPIWrapper":
return _import_metaphor_search()
elif name == "OpenWeatherMapAPIWrapper":
return _import_openweathermap()
elif name == "OutlineAPIWrapper":
return _import_outline()
elif name == "Portkey":
return _import_portkey()
elif name == "PowerBIDataset":
return _import_powerbi()
elif name == "PubMedAPIWrapper":
return _import_pubmed()
elif name == "PythonREPL":
return _import_python()
elif name == "SceneXplainAPIWrapper":
return _import_scenexplain()
elif name == "SearchApiAPIWrapper":
return _import_searchapi()
elif name == "SearxSearchWrapper":
return _import_searx_search()
elif name == "SerpAPIWrapper":
return _import_serpapi()
elif name == "SparkSQL":
return _import_spark_sql()
elif name == "StackExchangeAPIWrapper":
return _import_stackexchange()
elif name == "SQLDatabase":
return _import_sql_database()
elif name == "TensorflowDatasets":
return _import_tensorflow_datasets()
elif name == "TwilioAPIWrapper":
return _import_twilio()
elif name == "WikipediaAPIWrapper":
return _import_wikipedia()
elif name == "WolframAlphaAPIWrapper":
return _import_wolfram_alpha()
elif name == "ZapierNLAWrapper":
return _import_zapier()
else:
raise AttributeError(f"Could not find: {name}")
__all__ = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GoogleFinanceAPIWrapper",
"GoogleLensAPIWrapper",
"GoogleJobsAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleTrendsAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GraphQLAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MetaphorSearchAPIWrapper",
"OpenWeatherMapAPIWrapper",
"OutlineAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"PythonREPL",
"Requests",
"RequestsWrapper",
"SQLDatabase",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"StackExchangeAPIWrapper",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"ZapierNLAWrapper",
]
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/langchain/utilities/merriam_webster.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/tests/integration_tests/utilities/test_merriam_webster_api.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/tests/unit_tests/tools/test_imports.py | from langchain.tools import __all__
EXPECTED_ALL = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MetaphorSearchResults",
"MoveFileTool",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SleepTool",
"StackExchangeTool",
"StdInInquireTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"authenticate",
"format_tool_to_openai_function",
"tool",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/tests/unit_tests/tools/test_public_api.py | """Test the public API of the tools package."""
from langchain.tools import __all__ as public_api
_EXPECTED = [
"AINAppOps",
"AINOwnerOps",
"AINRuleOps",
"AINTransfer",
"AINValueOps",
"AIPluginTool",
"APIOperation",
"ArxivQueryRun",
"AzureCogsFormRecognizerTool",
"AzureCogsImageAnalysisTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
"BaseGraphQLTool",
"BaseRequestsTool",
"BaseSQLDatabaseTool",
"BaseSparkSQLTool",
"BaseTool",
"BearlyInterpreterTool",
"BingSearchResults",
"BingSearchRun",
"BraveSearch",
"ClickTool",
"CopyFileTool",
"CurrentWebPageTool",
"DeleteFileTool",
"DuckDuckGoSearchResults",
"DuckDuckGoSearchRun",
"E2BDataAnalysisTool",
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenAiTextToSpeechTool",
"EdenaiTool",
"ElevenLabsText2SpeechTool",
"ExtractHyperlinksTool",
"ExtractTextTool",
"FileSearchTool",
"GetElementsTool",
"GmailCreateDraft",
"GmailGetMessage",
"GmailGetThread",
"GmailSearch",
"GmailSendMessage",
"GoogleCloudTextToSpeechTool",
"GooglePlacesTool",
"GoogleSearchResults",
"GoogleSearchRun",
"GoogleSerperResults",
"GoogleSerperRun",
"HumanInputRun",
"IFTTTWebhook",
"InfoPowerBITool",
"InfoSQLDatabaseTool",
"InfoSparkSQLTool",
"JiraAction",
"JsonGetValueTool",
"JsonListKeysTool",
"ListDirectoryTool",
"ListPowerBITool",
"ListSQLDatabaseTool",
"ListSparkSQLTool",
"MetaphorSearchResults",
"MoveFileTool",
"NavigateBackTool",
"NavigateTool",
"O365CreateDraftMessage",
"O365SearchEmails",
"O365SearchEvents",
"O365SendEvent",
"O365SendMessage",
"OpenAPISpec",
"OpenWeatherMapQueryRun",
"PubmedQueryRun",
"RedditSearchRun",
"QueryCheckerTool",
"QueryPowerBITool",
"QuerySQLCheckerTool",
"QuerySQLDataBaseTool",
"QuerySparkSQLTool",
"ReadFileTool",
"RequestsDeleteTool",
"RequestsGetTool",
"RequestsPatchTool",
"RequestsPostTool",
"RequestsPutTool",
"SceneXplainTool",
"SearxSearchResults",
"SearxSearchRun",
"ShellTool",
"SleepTool",
"StdInInquireTool",
"StackExchangeTool",
"SteamshipImageGenerationTool",
"StructuredTool",
"Tool",
"VectorStoreQATool",
"VectorStoreQAWithSourcesTool",
"WikipediaQueryRun",
"WolframAlphaQueryRun",
"WriteFileTool",
"YahooFinanceNewsTool",
"YouTubeSearchTool",
"ZapierNLAListActions",
"ZapierNLARunAction",
"authenticate",
"format_tool_to_openai_function",
"tool",
]
def test_public_api() -> None:
"""Test for regressions or changes in the public API."""
# Check that the public API is as expected
assert set(public_api) == set(_EXPECTED)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 12,039 | Tools for Dictionary APIs | ### Feature request
It would be nice to have agents that could access dictionary APIs such as the Merriam-Webster API or Urban Dictionary API (for slang).
### Motivation
It can be useful to be able to look up definitions for words using a dictionary to provide additional context. With no current dictionary tools available, it would be beneficial for there to be an implemented dictionary tool available at all.
### Your contribution
We will open a PR that adds a new tool for accessing the Merriam-Webster Collegiate Dictionary API (https://dictionaryapi.com/products/api-collegiate-dictionary[/](https://www.dictionaryapi.com/)), which provides definitions for English words, as soon as possible. In the future this could be extended to support other Merriam-Webster APIs such as their Medical Dictionary API (https://dictionaryapi.com/products/api-medical-dictionary) or Spanish-English Dictionary API (https://dictionaryapi.com/products/api-spanish-dictionary).
We may also open another PR for Urban Dictionary API integration. | https://github.com/langchain-ai/langchain/issues/12039 | https://github.com/langchain-ai/langchain/pull/12044 | f3dd4a10cffd507a1300abf0f7729e95072f44eb | c2e3963da4b7c6650fc37acfa8ea39a355e7dae9 | "2023-10-19T18:31:45Z" | python | "2023-11-30T01:28:29Z" | libs/langchain/tests/unit_tests/utilities/test_imports.py | from langchain.utilities import __all__
EXPECTED_ALL = [
"AlphaVantageAPIWrapper",
"ApifyWrapper",
"ArceeWrapper",
"ArxivAPIWrapper",
"BibtexparserWrapper",
"BingSearchAPIWrapper",
"BraveSearchWrapper",
"DuckDuckGoSearchAPIWrapper",
"GoldenQueryAPIWrapper",
"GoogleFinanceAPIWrapper",
"GoogleJobsAPIWrapper",
"GoogleLensAPIWrapper",
"GooglePlacesAPIWrapper",
"GoogleScholarAPIWrapper",
"GoogleSearchAPIWrapper",
"GoogleSerperAPIWrapper",
"GoogleTrendsAPIWrapper",
"GraphQLAPIWrapper",
"JiraAPIWrapper",
"LambdaWrapper",
"MaxComputeAPIWrapper",
"MetaphorSearchAPIWrapper",
"OpenWeatherMapAPIWrapper",
"OutlineAPIWrapper",
"Portkey",
"PowerBIDataset",
"PubMedAPIWrapper",
"PythonREPL",
"Requests",
"RequestsWrapper",
"SQLDatabase",
"SceneXplainAPIWrapper",
"SearchApiAPIWrapper",
"SearxSearchWrapper",
"SerpAPIWrapper",
"SparkSQL",
"StackExchangeAPIWrapper",
"TensorflowDatasets",
"TextRequestsWrapper",
"TwilioAPIWrapper",
"WikipediaAPIWrapper",
"WolframAlphaAPIWrapper",
"ZapierNLAWrapper",
]
def test_all_imports() -> None:
assert set(__all__) == set(EXPECTED_ALL)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,127 | Volc Engine MaaS has wrong entry in LLM type to class dict (causing SpaCy to not work with LangChain anymore) | ### System Info
* Windows 11 Home (build 22621.2715)
* Python 3.12.0
* Clean virtual environment using Poetry with following dependencies:
```
python = "3.12.0"
langchain = "0.0.344"
spacy = "3.7.2"
spacy-llm = "0.6.4"
```
### Who can help?
@h3l As the creator of the pull request where VolcEngine was introduced
@baskaryan As tag handler of that pull request
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
Anything that triggers spaCy's registry to make an inventory, for example:
```python
import spacy
spacy.blank("en")
```
With the last part of the Traceback being:
```
File "PROJECT_FOLDER\.venv\Lib\site-packages\langchain\llms\__init__.py", line 699, in __getattr__
k: v() for k, v in get_type_to_cls_dict().items()
^^^
File "PROJECT_FOLDER\.venv\Lib\site-packages\langchain_core\load\serializable.py", line 97, in __init__
super().__init__(**kwargs)
File "PROJECT_FOLDER\.venv\Lib\site-packages\pydantic\v1\main.py", line 341, in __init__
raise validation_error
pydantic.v1.error_wrappers.ValidationError: 1 validation error for VolcEngineMaasLLM
__root__
Did not find volc_engine_maas_ak, please add an environment variable `VOLC_ACCESSKEY` which contains it, or pass `volc_engine_maas_ak` as a named parameter. (type=value_error)
```
#### What I think causes this
I am quite certain that this is caused by [`langchain.llms.__init__.py:869 (for commit b161f30)`](https://github.com/langchain-ai/langchain/blob/b161f302ff56a14d8d0331cbec4a3efa23d06e1a/libs/langchain/langchain/llms/__init__.py#L869C51-L869C51):
```python
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
...
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
# Line below is the only that actually calls the import function, returning a class instead of an import function
"VolcEngineMaasLLM": _import_volcengine_maas(),
}
```
The Volc Engine Maas LLM is the only in this dict to actually call the import function, while all other entries only the function itself, and do not call it.
### Expected behavior
Class to type dict only returns import functions, not actual classes:
```python
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
...
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
# What I think would be correct (now without function call)
"VolcEngineMaasLLM": _import_volcengine_maas,
}
```
Unfortunately I don't have time to put in a PR myself, but I hope this helps finding the solution!
| https://github.com/langchain-ai/langchain/issues/14127 | https://github.com/langchain-ai/langchain/pull/14194 | 6ae0194dc70119d8b05a0624a6cc4950f9f84608 | 818252b1f8b9ac9af6bb80d43b21c5e95d6b2e11 | "2023-12-01T13:58:13Z" | python | "2023-12-03T16:43:23Z" | libs/langchain/langchain/llms/__init__.py | """
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun,
CallbackManager, AsyncCallbackManager,
AIMessage, BaseMessage
""" # noqa: E501
from typing import Any, Callable, Dict, Type
from langchain.llms.base import BaseLLM
def _import_ai21() -> Any:
from langchain.llms.ai21 import AI21
return AI21
def _import_aleph_alpha() -> Any:
from langchain.llms.aleph_alpha import AlephAlpha
return AlephAlpha
def _import_amazon_api_gateway() -> Any:
from langchain.llms.amazon_api_gateway import AmazonAPIGateway
return AmazonAPIGateway
def _import_anthropic() -> Any:
from langchain.llms.anthropic import Anthropic
return Anthropic
def _import_anyscale() -> Any:
from langchain.llms.anyscale import Anyscale
return Anyscale
def _import_arcee() -> Any:
from langchain.llms.arcee import Arcee
return Arcee
def _import_aviary() -> Any:
from langchain.llms.aviary import Aviary
return Aviary
def _import_azureml_endpoint() -> Any:
from langchain.llms.azureml_endpoint import AzureMLOnlineEndpoint
return AzureMLOnlineEndpoint
def _import_baidu_qianfan_endpoint() -> Any:
from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint
return QianfanLLMEndpoint
def _import_bananadev() -> Any:
from langchain.llms.bananadev import Banana
return Banana
def _import_baseten() -> Any:
from langchain.llms.baseten import Baseten
return Baseten
def _import_beam() -> Any:
from langchain.llms.beam import Beam
return Beam
def _import_bedrock() -> Any:
from langchain.llms.bedrock import Bedrock
return Bedrock
def _import_bittensor() -> Any:
from langchain.llms.bittensor import NIBittensorLLM
return NIBittensorLLM
def _import_cerebriumai() -> Any:
from langchain.llms.cerebriumai import CerebriumAI
return CerebriumAI
def _import_chatglm() -> Any:
from langchain.llms.chatglm import ChatGLM
return ChatGLM
def _import_clarifai() -> Any:
from langchain.llms.clarifai import Clarifai
return Clarifai
def _import_cohere() -> Any:
from langchain.llms.cohere import Cohere
return Cohere
def _import_ctransformers() -> Any:
from langchain.llms.ctransformers import CTransformers
return CTransformers
def _import_ctranslate2() -> Any:
from langchain.llms.ctranslate2 import CTranslate2
return CTranslate2
def _import_databricks() -> Any:
from langchain.llms.databricks import Databricks
return Databricks
def _import_databricks_chat() -> Any:
from langchain.chat_models.databricks import ChatDatabricks
return ChatDatabricks
def _import_deepinfra() -> Any:
from langchain.llms.deepinfra import DeepInfra
return DeepInfra
def _import_deepsparse() -> Any:
from langchain.llms.deepsparse import DeepSparse
return DeepSparse
def _import_edenai() -> Any:
from langchain.llms.edenai import EdenAI
return EdenAI
def _import_fake() -> Any:
from langchain.llms.fake import FakeListLLM
return FakeListLLM
def _import_fireworks() -> Any:
from langchain.llms.fireworks import Fireworks
return Fireworks
def _import_forefrontai() -> Any:
from langchain.llms.forefrontai import ForefrontAI
return ForefrontAI
def _import_gigachat() -> Any:
from langchain.llms.gigachat import GigaChat
return GigaChat
def _import_google_palm() -> Any:
from langchain.llms.google_palm import GooglePalm
return GooglePalm
def _import_gooseai() -> Any:
from langchain.llms.gooseai import GooseAI
return GooseAI
def _import_gpt4all() -> Any:
from langchain.llms.gpt4all import GPT4All
return GPT4All
def _import_gradient_ai() -> Any:
from langchain.llms.gradient_ai import GradientLLM
return GradientLLM
def _import_huggingface_endpoint() -> Any:
from langchain.llms.huggingface_endpoint import HuggingFaceEndpoint
return HuggingFaceEndpoint
def _import_huggingface_hub() -> Any:
from langchain.llms.huggingface_hub import HuggingFaceHub
return HuggingFaceHub
def _import_huggingface_pipeline() -> Any:
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
return HuggingFacePipeline
def _import_huggingface_text_gen_inference() -> Any:
from langchain.llms.huggingface_text_gen_inference import (
HuggingFaceTextGenInference,
)
return HuggingFaceTextGenInference
def _import_human() -> Any:
from langchain.llms.human import HumanInputLLM
return HumanInputLLM
def _import_javelin_ai_gateway() -> Any:
from langchain.llms.javelin_ai_gateway import JavelinAIGateway
return JavelinAIGateway
def _import_koboldai() -> Any:
from langchain.llms.koboldai import KoboldApiLLM
return KoboldApiLLM
def _import_llamacpp() -> Any:
from langchain.llms.llamacpp import LlamaCpp
return LlamaCpp
def _import_manifest() -> Any:
from langchain.llms.manifest import ManifestWrapper
return ManifestWrapper
def _import_minimax() -> Any:
from langchain.llms.minimax import Minimax
return Minimax
def _import_mlflow() -> Any:
from langchain.llms.mlflow import Mlflow
return Mlflow
def _import_mlflow_chat() -> Any:
from langchain.chat_models.mlflow import ChatMlflow
return ChatMlflow
def _import_mlflow_ai_gateway() -> Any:
from langchain.llms.mlflow_ai_gateway import MlflowAIGateway
return MlflowAIGateway
def _import_modal() -> Any:
from langchain.llms.modal import Modal
return Modal
def _import_mosaicml() -> Any:
from langchain.llms.mosaicml import MosaicML
return MosaicML
def _import_nlpcloud() -> Any:
from langchain.llms.nlpcloud import NLPCloud
return NLPCloud
def _import_octoai_endpoint() -> Any:
from langchain.llms.octoai_endpoint import OctoAIEndpoint
return OctoAIEndpoint
def _import_ollama() -> Any:
from langchain.llms.ollama import Ollama
return Ollama
def _import_opaqueprompts() -> Any:
from langchain.llms.opaqueprompts import OpaquePrompts
return OpaquePrompts
def _import_azure_openai() -> Any:
from langchain.llms.openai import AzureOpenAI
return AzureOpenAI
def _import_openai() -> Any:
from langchain.llms.openai import OpenAI
return OpenAI
def _import_openai_chat() -> Any:
from langchain.llms.openai import OpenAIChat
return OpenAIChat
def _import_openllm() -> Any:
from langchain.llms.openllm import OpenLLM
return OpenLLM
def _import_openlm() -> Any:
from langchain.llms.openlm import OpenLM
return OpenLM
def _import_pai_eas_endpoint() -> Any:
from langchain.llms.pai_eas_endpoint import PaiEasEndpoint
return PaiEasEndpoint
def _import_petals() -> Any:
from langchain.llms.petals import Petals
return Petals
def _import_pipelineai() -> Any:
from langchain.llms.pipelineai import PipelineAI
return PipelineAI
def _import_predibase() -> Any:
from langchain.llms.predibase import Predibase
return Predibase
def _import_predictionguard() -> Any:
from langchain.llms.predictionguard import PredictionGuard
return PredictionGuard
def _import_promptlayer() -> Any:
from langchain.llms.promptlayer_openai import PromptLayerOpenAI
return PromptLayerOpenAI
def _import_promptlayer_chat() -> Any:
from langchain.llms.promptlayer_openai import PromptLayerOpenAIChat
return PromptLayerOpenAIChat
def _import_replicate() -> Any:
from langchain.llms.replicate import Replicate
return Replicate
def _import_rwkv() -> Any:
from langchain.llms.rwkv import RWKV
return RWKV
def _import_sagemaker_endpoint() -> Any:
from langchain.llms.sagemaker_endpoint import SagemakerEndpoint
return SagemakerEndpoint
def _import_self_hosted() -> Any:
from langchain.llms.self_hosted import SelfHostedPipeline
return SelfHostedPipeline
def _import_self_hosted_hugging_face() -> Any:
from langchain.llms.self_hosted_hugging_face import SelfHostedHuggingFaceLLM
return SelfHostedHuggingFaceLLM
def _import_stochasticai() -> Any:
from langchain.llms.stochasticai import StochasticAI
return StochasticAI
def _import_symblai_nebula() -> Any:
from langchain.llms.symblai_nebula import Nebula
return Nebula
def _import_textgen() -> Any:
from langchain.llms.textgen import TextGen
return TextGen
def _import_titan_takeoff() -> Any:
from langchain.llms.titan_takeoff import TitanTakeoff
return TitanTakeoff
def _import_titan_takeoff_pro() -> Any:
from langchain.llms.titan_takeoff_pro import TitanTakeoffPro
return TitanTakeoffPro
def _import_together() -> Any:
from langchain.llms.together import Together
return Together
def _import_tongyi() -> Any:
from langchain.llms.tongyi import Tongyi
return Tongyi
def _import_vertex() -> Any:
from langchain.llms.vertexai import VertexAI
return VertexAI
def _import_vertex_model_garden() -> Any:
from langchain.llms.vertexai import VertexAIModelGarden
return VertexAIModelGarden
def _import_vllm() -> Any:
from langchain.llms.vllm import VLLM
return VLLM
def _import_vllm_openai() -> Any:
from langchain.llms.vllm import VLLMOpenAI
return VLLMOpenAI
def _import_watsonxllm() -> Any:
from langchain.llms.watsonxllm import WatsonxLLM
return WatsonxLLM
def _import_writer() -> Any:
from langchain.llms.writer import Writer
return Writer
def _import_xinference() -> Any:
from langchain.llms.xinference import Xinference
return Xinference
def _import_yandex_gpt() -> Any:
from langchain.llms.yandex import YandexGPT
return YandexGPT
def _import_volcengine_maas() -> Any:
from langchain.llms.volcengine_maas import VolcEngineMaasLLM
return VolcEngineMaasLLM
def __getattr__(name: str) -> Any:
if name == "AI21":
return _import_ai21()
elif name == "AlephAlpha":
return _import_aleph_alpha()
elif name == "AmazonAPIGateway":
return _import_amazon_api_gateway()
elif name == "Anthropic":
return _import_anthropic()
elif name == "Anyscale":
return _import_anyscale()
elif name == "Arcee":
return _import_arcee()
elif name == "Aviary":
return _import_aviary()
elif name == "AzureMLOnlineEndpoint":
return _import_azureml_endpoint()
elif name == "QianfanLLMEndpoint":
return _import_baidu_qianfan_endpoint()
elif name == "Banana":
return _import_bananadev()
elif name == "Baseten":
return _import_baseten()
elif name == "Beam":
return _import_beam()
elif name == "Bedrock":
return _import_bedrock()
elif name == "NIBittensorLLM":
return _import_bittensor()
elif name == "CerebriumAI":
return _import_cerebriumai()
elif name == "ChatGLM":
return _import_chatglm()
elif name == "Clarifai":
return _import_clarifai()
elif name == "Cohere":
return _import_cohere()
elif name == "CTransformers":
return _import_ctransformers()
elif name == "CTranslate2":
return _import_ctranslate2()
elif name == "Databricks":
return _import_databricks()
elif name == "DeepInfra":
return _import_deepinfra()
elif name == "DeepSparse":
return _import_deepsparse()
elif name == "EdenAI":
return _import_edenai()
elif name == "FakeListLLM":
return _import_fake()
elif name == "Fireworks":
return _import_fireworks()
elif name == "ForefrontAI":
return _import_forefrontai()
elif name == "GigaChat":
return _import_gigachat()
elif name == "GooglePalm":
return _import_google_palm()
elif name == "GooseAI":
return _import_gooseai()
elif name == "GPT4All":
return _import_gpt4all()
elif name == "GradientLLM":
return _import_gradient_ai()
elif name == "HuggingFaceEndpoint":
return _import_huggingface_endpoint()
elif name == "HuggingFaceHub":
return _import_huggingface_hub()
elif name == "HuggingFacePipeline":
return _import_huggingface_pipeline()
elif name == "HuggingFaceTextGenInference":
return _import_huggingface_text_gen_inference()
elif name == "HumanInputLLM":
return _import_human()
elif name == "JavelinAIGateway":
return _import_javelin_ai_gateway()
elif name == "KoboldApiLLM":
return _import_koboldai()
elif name == "LlamaCpp":
return _import_llamacpp()
elif name == "ManifestWrapper":
return _import_manifest()
elif name == "Minimax":
return _import_minimax()
elif name == "Mlflow":
return _import_mlflow()
elif name == "MlflowAIGateway":
return _import_mlflow_ai_gateway()
elif name == "Modal":
return _import_modal()
elif name == "MosaicML":
return _import_mosaicml()
elif name == "NLPCloud":
return _import_nlpcloud()
elif name == "OctoAIEndpoint":
return _import_octoai_endpoint()
elif name == "Ollama":
return _import_ollama()
elif name == "OpaquePrompts":
return _import_opaqueprompts()
elif name == "AzureOpenAI":
return _import_azure_openai()
elif name == "OpenAI":
return _import_openai()
elif name == "OpenAIChat":
return _import_openai_chat()
elif name == "OpenLLM":
return _import_openllm()
elif name == "OpenLM":
return _import_openlm()
elif name == "PaiEasEndpoint":
return _import_pai_eas_endpoint()
elif name == "Petals":
return _import_petals()
elif name == "PipelineAI":
return _import_pipelineai()
elif name == "Predibase":
return _import_predibase()
elif name == "PredictionGuard":
return _import_predictionguard()
elif name == "PromptLayerOpenAI":
return _import_promptlayer()
elif name == "PromptLayerOpenAIChat":
return _import_promptlayer_chat()
elif name == "Replicate":
return _import_replicate()
elif name == "RWKV":
return _import_rwkv()
elif name == "SagemakerEndpoint":
return _import_sagemaker_endpoint()
elif name == "SelfHostedPipeline":
return _import_self_hosted()
elif name == "SelfHostedHuggingFaceLLM":
return _import_self_hosted_hugging_face()
elif name == "StochasticAI":
return _import_stochasticai()
elif name == "Nebula":
return _import_symblai_nebula()
elif name == "TextGen":
return _import_textgen()
elif name == "TitanTakeoff":
return _import_titan_takeoff()
elif name == "TitanTakeoffPro":
return _import_titan_takeoff_pro()
elif name == "Together":
return _import_together()
elif name == "Tongyi":
return _import_tongyi()
elif name == "VertexAI":
return _import_vertex()
elif name == "VertexAIModelGarden":
return _import_vertex_model_garden()
elif name == "VLLM":
return _import_vllm()
elif name == "VLLMOpenAI":
return _import_vllm_openai()
elif name == "WatsonxLLM":
return _import_watsonxllm()
elif name == "Writer":
return _import_writer()
elif name == "Xinference":
return _import_xinference()
elif name == "YandexGPT":
return _import_yandex_gpt()
elif name == "VolcEngineMaasLLM":
return _import_volcengine_maas()
elif name == "type_to_cls_dict":
# for backwards compatibility
type_to_cls_dict: Dict[str, Type[BaseLLM]] = {
k: v() for k, v in get_type_to_cls_dict().items()
}
return type_to_cls_dict
else:
raise AttributeError(f"Could not find: {name}")
__all__ = [
"AI21",
"AlephAlpha",
"AmazonAPIGateway",
"Anthropic",
"Anyscale",
"Arcee",
"Aviary",
"AzureMLOnlineEndpoint",
"AzureOpenAI",
"Banana",
"Baseten",
"Beam",
"Bedrock",
"CTransformers",
"CTranslate2",
"CerebriumAI",
"ChatGLM",
"Clarifai",
"Cohere",
"Databricks",
"DeepInfra",
"DeepSparse",
"EdenAI",
"FakeListLLM",
"Fireworks",
"ForefrontAI",
"GigaChat",
"GPT4All",
"GooglePalm",
"GooseAI",
"GradientLLM",
"HuggingFaceEndpoint",
"HuggingFaceHub",
"HuggingFacePipeline",
"HuggingFaceTextGenInference",
"HumanInputLLM",
"KoboldApiLLM",
"LlamaCpp",
"TextGen",
"ManifestWrapper",
"Minimax",
"MlflowAIGateway",
"Modal",
"MosaicML",
"Nebula",
"NIBittensorLLM",
"NLPCloud",
"Ollama",
"OpenAI",
"OpenAIChat",
"OpenLLM",
"OpenLM",
"PaiEasEndpoint",
"Petals",
"PipelineAI",
"Predibase",
"PredictionGuard",
"PromptLayerOpenAI",
"PromptLayerOpenAIChat",
"OpaquePrompts",
"RWKV",
"Replicate",
"SagemakerEndpoint",
"SelfHostedHuggingFaceLLM",
"SelfHostedPipeline",
"StochasticAI",
"TitanTakeoff",
"TitanTakeoffPro",
"Tongyi",
"VertexAI",
"VertexAIModelGarden",
"VLLM",
"VLLMOpenAI",
"WatsonxLLM",
"Writer",
"OctoAIEndpoint",
"Xinference",
"JavelinAIGateway",
"QianfanLLMEndpoint",
"YandexGPT",
"VolcEngineMaasLLM",
]
def get_type_to_cls_dict() -> Dict[str, Callable[[], Type[BaseLLM]]]:
return {
"ai21": _import_ai21,
"aleph_alpha": _import_aleph_alpha,
"amazon_api_gateway": _import_amazon_api_gateway,
"amazon_bedrock": _import_bedrock,
"anthropic": _import_anthropic,
"anyscale": _import_anyscale,
"arcee": _import_arcee,
"aviary": _import_aviary,
"azure": _import_azure_openai,
"azureml_endpoint": _import_azureml_endpoint,
"bananadev": _import_bananadev,
"baseten": _import_baseten,
"beam": _import_beam,
"cerebriumai": _import_cerebriumai,
"chat_glm": _import_chatglm,
"clarifai": _import_clarifai,
"cohere": _import_cohere,
"ctransformers": _import_ctransformers,
"ctranslate2": _import_ctranslate2,
"databricks": _import_databricks,
"databricks-chat": _import_databricks_chat,
"deepinfra": _import_deepinfra,
"deepsparse": _import_deepsparse,
"edenai": _import_edenai,
"fake-list": _import_fake,
"forefrontai": _import_forefrontai,
"giga-chat-model": _import_gigachat,
"google_palm": _import_google_palm,
"gooseai": _import_gooseai,
"gradient": _import_gradient_ai,
"gpt4all": _import_gpt4all,
"huggingface_endpoint": _import_huggingface_endpoint,
"huggingface_hub": _import_huggingface_hub,
"huggingface_pipeline": _import_huggingface_pipeline,
"huggingface_textgen_inference": _import_huggingface_text_gen_inference,
"human-input": _import_human,
"koboldai": _import_koboldai,
"llamacpp": _import_llamacpp,
"textgen": _import_textgen,
"minimax": _import_minimax,
"mlflow": _import_mlflow,
"mlflow-chat": _import_mlflow_chat,
"mlflow-ai-gateway": _import_mlflow_ai_gateway,
"modal": _import_modal,
"mosaic": _import_mosaicml,
"nebula": _import_symblai_nebula,
"nibittensor": _import_bittensor,
"nlpcloud": _import_nlpcloud,
"ollama": _import_ollama,
"openai": _import_openai,
"openlm": _import_openlm,
"pai_eas_endpoint": _import_pai_eas_endpoint,
"petals": _import_petals,
"pipelineai": _import_pipelineai,
"predibase": _import_predibase,
"opaqueprompts": _import_opaqueprompts,
"replicate": _import_replicate,
"rwkv": _import_rwkv,
"sagemaker_endpoint": _import_sagemaker_endpoint,
"self_hosted": _import_self_hosted,
"self_hosted_hugging_face": _import_self_hosted_hugging_face,
"stochasticai": _import_stochasticai,
"together": _import_together,
"tongyi": _import_tongyi,
"titan_takeoff": _import_titan_takeoff,
"titan_takeoff_pro": _import_titan_takeoff_pro,
"vertexai": _import_vertex,
"vertexai_model_garden": _import_vertex_model_garden,
"openllm": _import_openllm,
"openllm_client": _import_openllm,
"vllm": _import_vllm,
"vllm_openai": _import_vllm_openai,
"watsonxllm": _import_watsonxllm,
"writer": _import_writer,
"xinference": _import_xinference,
"javelin-ai-gateway": _import_javelin_ai_gateway,
"qianfan_endpoint": _import_baidu_qianfan_endpoint,
"yandex_gpt": _import_yandex_gpt,
"VolcEngineMaasLLM": _import_volcengine_maas(),
}
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,069 | AzureOpenAI azure_ad_token_provider Keyerror | ### System Info
When I use below snippet of code
```
import os
from azure.identity import DefaultAzureCredential
from azure.identity import get_bearer_token_provider
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id,
interactive_browser_client_id=client_id,
client_secret=client_secret)
token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default")
endpoint = "https://xxxx.openai.azure.com"
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
```
I get error :
```---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[36], line 21
18 # api_version = "2023-05-15"
19 endpoint = "https://xxxx.openai.azure.com"
---> 21 client = AzureOpenAI(
22 azure_endpoint=endpoint,
23 api_version="2023-05-15",
24 azure_deployment="example-gpt-4",
25 azure_ad_token_provider=token_provider,
26 )
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs)
96 def __init__(self, **kwargs: Any) -> None:
---> 97 super().__init__(**kwargs)
98 self._lc_kwargs = kwargs
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data)
333 """
334 Create a new model by parsing and validating input data from keyword arguments.
335
336 Raises ValidationError if the input data cannot be parsed to form a valid model.
337 """
338 # Uses something other than `self` the first arg to allow "self" as a settable attribute
--> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
340 if validation_error:
341 raise validation_error
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls)
1100 continue
1101 try:
-> 1102 values = validator(cls_, values)
1103 except (ValueError, TypeError, AssertionError) as exc:
1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values)
877 values["openai_api_base"] += (
878 "/deployments/" + values["deployment_name"]
879 )
880 values["deployment_name"] = None
881 client_params = {
882 "api_version": values["openai_api_version"],
883 "azure_endpoint": values["azure_endpoint"],
884 "azure_deployment": values["deployment_name"],
885 "api_key": values["openai_api_key"],
886 "azure_ad_token": values["azure_ad_token"],
--> 887 "azure_ad_token_provider": values["azure_ad_token_provider"],
888 "organization": values["openai_organization"],
889 "base_url": values["openai_api_base"],
890 "timeout": values["request_timeout"],
891 "max_retries": values["max_retries"],
892 "default_headers": values["default_headers"],
893 "default_query": values["default_query"],
894 "http_client": values["http_client"],
895 }
896 values["client"] = openai.AzureOpenAI(**client_params).completions
897 values["async_client"] = openai.AsyncAzureOpenAI(
898 **client_params
899 ).completions
KeyError: 'azure_ad_token_provider'
```
Ive also tried AzureChatOpenAI , and I get the same error back.
The error is not reproduced when I use openai library AzureOpenAI .
Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with.
any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work.
libraries :
pydantic 1.10.12
pydantic_core 2.10.1
openai 1.2.0
langchain 0.0.342
langchain-core 0.0.7
### Who can help?
@hwchase17 @agola11
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
import os
from azure.identity import DefaultAzureCredential
from azure.identity import get_bearer_token_provider
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id,
interactive_browser_client_id=client_id,
client_secret=client_secret)
token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default")
endpoint = "https://xxxx.openai.azure.com"
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
### Expected behavior
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
should return a Runnable instance which I can use for LLMChain | https://github.com/langchain-ai/langchain/issues/14069 | https://github.com/langchain-ai/langchain/pull/14166 | 9938086df07d69d24f9770209ea9087d3b906155 | 62505043be20cf8af491e30785a6ca0eeb1d276e | "2023-11-30T13:39:55Z" | python | "2023-12-03T16:55:25Z" | libs/langchain/langchain/chat_models/azure_openai.py | """Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
import os
import warnings
from typing import Any, Dict, Union
from langchain_core.outputs import ChatResult
from langchain_core.pydantic_v1 import BaseModel, Field, root_validator
from langchain.chat_models.openai import ChatOpenAI
from langchain.utils import get_from_dict_or_env
from langchain.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
class AzureChatOpenAI(ChatOpenAI):
"""`Azure OpenAI` Chat Completion API.
To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``AZURE_OPENAI_API_KEY``
- ``AZURE_OPENAI_API_ENDPOINT``
- ``AZURE_OPENAI_AD_TOKEN``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For example, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
azure_deployment="35-turbo-dev",
openai_api_version="2023-05-15",
)
Be aware the API version may change.
You can also specify the version of the model using ``model_version`` constructor
parameter, as Azure OpenAI doesn't return model version with the response.
Default is empty. When you specify the version, it will be appended to the
model name in the response. Setting correct version will help you to calculate the
cost properly. Model version is not validated, so make sure you set it correctly
to get the correct cost.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str = Field(default="", alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
""" # noqa: E501
azure_ad_token_provider: Union[str, None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
model_version: str = ""
"""Legacy, for openai<1.0.0 support."""
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION"
)
# Check OPENAI_ORGANIZATION for backwards compatibility.
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
values["openai_proxy"] = get_from_dict_or_env(
values, "openai_proxy", "OPENAI_PROXY", default=""
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
)
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment_name"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment_name"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment_name"]
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).chat.completions
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params
).chat.completions
else:
values["client"] = openai.ChatCompletion
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
if is_openai_v1():
return super()._default_params
else:
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the config params used for the openai client."""
if is_openai_v1():
return super()._client_params
else:
return {
**super()._client_params,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
"Azure has not provided the response due to a content filter "
"being triggered"
)
chat_result = super()._create_chat_result(response)
if "model" in response:
model = response["model"]
if self.model_version:
model = f"{model}-{self.model_version}"
if chat_result.llm_output is not None and isinstance(
chat_result.llm_output, dict
):
chat_result.llm_output["model_name"] = model
return chat_result
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,069 | AzureOpenAI azure_ad_token_provider Keyerror | ### System Info
When I use below snippet of code
```
import os
from azure.identity import DefaultAzureCredential
from azure.identity import get_bearer_token_provider
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id,
interactive_browser_client_id=client_id,
client_secret=client_secret)
token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default")
endpoint = "https://xxxx.openai.azure.com"
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
```
I get error :
```---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[36], line 21
18 # api_version = "2023-05-15"
19 endpoint = "https://xxxx.openai.azure.com"
---> 21 client = AzureOpenAI(
22 azure_endpoint=endpoint,
23 api_version="2023-05-15",
24 azure_deployment="example-gpt-4",
25 azure_ad_token_provider=token_provider,
26 )
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain_core/load/serializable.py:97, in Serializable.__init__(self, **kwargs)
96 def __init__(self, **kwargs: Any) -> None:
---> 97 super().__init__(**kwargs)
98 self._lc_kwargs = kwargs
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:339, in BaseModel.__init__(__pydantic_self__, **data)
333 """
334 Create a new model by parsing and validating input data from keyword arguments.
335
336 Raises ValidationError if the input data cannot be parsed to form a valid model.
337 """
338 # Uses something other than `self` the first arg to allow "self" as a settable attribute
--> 339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
340 if validation_error:
341 raise validation_error
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/pydantic/v1/main.py:1102, in validate_model(model, input_data, cls)
1100 continue
1101 try:
-> 1102 values = validator(cls_, values)
1103 except (ValueError, TypeError, AssertionError) as exc:
1104 errors.append(ErrorWrapper(exc, loc=ROOT_KEY))
File ~/PycharmProjects/aicc/env/lib/python3.9/site-packages/langchain/llms/openai.py:887, in AzureOpenAI.validate_environment(cls, values)
877 values["openai_api_base"] += (
878 "/deployments/" + values["deployment_name"]
879 )
880 values["deployment_name"] = None
881 client_params = {
882 "api_version": values["openai_api_version"],
883 "azure_endpoint": values["azure_endpoint"],
884 "azure_deployment": values["deployment_name"],
885 "api_key": values["openai_api_key"],
886 "azure_ad_token": values["azure_ad_token"],
--> 887 "azure_ad_token_provider": values["azure_ad_token_provider"],
888 "organization": values["openai_organization"],
889 "base_url": values["openai_api_base"],
890 "timeout": values["request_timeout"],
891 "max_retries": values["max_retries"],
892 "default_headers": values["default_headers"],
893 "default_query": values["default_query"],
894 "http_client": values["http_client"],
895 }
896 values["client"] = openai.AzureOpenAI(**client_params).completions
897 values["async_client"] = openai.AsyncAzureOpenAI(
898 **client_params
899 ).completions
KeyError: 'azure_ad_token_provider'
```
Ive also tried AzureChatOpenAI , and I get the same error back.
The error is not reproduced when I use openai library AzureOpenAI .
Also on openai the azure_ad_token_provider has type azure_ad_token_provider: 'AzureADTokenProvider | None' = None while in langchain it has type azure_ad_token_provider: Optional[str] = None which also makes me wonder if it would take as input a different type than string to work with.
any ideas on how to fix this? Im actually using Azure Service principal authentication, and if I use as alternative field azure_ad_token = credential.get_token(“https://cognitiveservices.azure.com/.default”).token I get token expired after 60min which does not happen with a bearer token, so It is important to me to make the token_provider work.
libraries :
pydantic 1.10.12
pydantic_core 2.10.1
openai 1.2.0
langchain 0.0.342
langchain-core 0.0.7
### Who can help?
@hwchase17 @agola11
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
import os
from azure.identity import DefaultAzureCredential
from azure.identity import get_bearer_token_provider
from langchain.llms import AzureOpenAI
from langchain.chat_models import AzureChatOpenAI
credential = DefaultAzureCredential(interactive_browser_tenant_id=tenant_id,
interactive_browser_client_id=client_id,
client_secret=client_secret)
token_provider = get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default")
endpoint = "https://xxxx.openai.azure.com"
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
### Expected behavior
client = AzureOpenAI( azure_endpoint=endpoint,
api_version="2023-05-15",
azure_deployment="example-gpt-4",
azure_ad_token_provider=token_provider)
should return a Runnable instance which I can use for LLMChain | https://github.com/langchain-ai/langchain/issues/14069 | https://github.com/langchain-ai/langchain/pull/14166 | 9938086df07d69d24f9770209ea9087d3b906155 | 62505043be20cf8af491e30785a6ca0eeb1d276e | "2023-11-30T13:39:55Z" | python | "2023-12-03T16:55:25Z" | libs/langchain/langchain/llms/openai.py | from __future__ import annotations
import logging
import os
import sys
import warnings
from typing import (
AbstractSet,
Any,
AsyncIterator,
Callable,
Collection,
Dict,
Iterator,
List,
Literal,
Mapping,
Optional,
Set,
Tuple,
Union,
)
from langchain_core.outputs import Generation, GenerationChunk, LLMResult
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.utils import get_pydantic_field_names
from langchain_core.utils.utils import build_extra_kwargs
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import BaseLLM, create_base_retry_decorator
from langchain.utils import get_from_dict_or_env
from langchain.utils.openai import is_openai_v1
logger = logging.getLogger(__name__)
def update_token_usage(
keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any]
) -> None:
"""Update token usage."""
_keys_to_use = keys.intersection(response["usage"])
for _key in _keys_to_use:
if _key not in token_usage:
token_usage[_key] = response["usage"][_key]
else:
token_usage[_key] += response["usage"][_key]
def _stream_response_to_generation_chunk(
stream_response: Dict[str, Any],
) -> GenerationChunk:
"""Convert a stream response to a generation chunk."""
if not stream_response["choices"]:
return GenerationChunk(text="")
return GenerationChunk(
text=stream_response["choices"][0]["text"],
generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None),
logprobs=stream_response["choices"][0].get("logprobs", None),
),
)
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None:
"""Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None
)
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"]
def _streaming_response_template() -> Dict[str, Any]:
return {
"choices": [
{
"text": "",
"finish_reason": None,
"logprobs": None,
}
]
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
def completion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
if is_openai_v1():
return llm.client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
return _completion_with_retry(**kwargs)
async def acompletion_with_retry(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
if is_openai_v1():
return await llm.async_client.create(**kwargs)
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
class BaseOpenAI(BaseLLM):
"""Base OpenAI large language model class."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {"openai_api_key": "OPENAI_API_KEY"}
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.openai_api_base:
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization:
attributes["openai_organization"] = self.openai_organization
if self.openai_proxy:
attributes["openai_proxy"] = self.openai_proxy
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
return True
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="text-davinci-003", alias="model")
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
best_of: int = 1
"""Generates best_of completions server-side and returns the "best"."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
openai_organization: Optional[str] = Field(default=None, alias="organization")
"""Automatically inferred from env var `OPENAI_ORG_ID` if not provided."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
batch_size: int = 20
"""Batch size to use when passing multiple documents to generate."""
request_timeout: Union[float, Tuple[float, float], Any, None] = Field(
default=None, alias="timeout"
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
max_retries: int = 2
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here."""
default_headers: Union[Mapping[str, str], None] = None
default_query: Union[Mapping[str, object], None] = None
# Configure a custom httpx client. See the
# [httpx documentation](https://www.python-httpx.org/api/#client) for more details.
http_client: Union[Any, None] = None
"""Optional httpx.Client."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
"""Initialize the OpenAI object."""
model_name = data.get("model_name", "")
if (
model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4")
) and "-instruct" not in model_name:
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return OpenAIChat(**data)
return super().__new__(cls)
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
values["model_kwargs"] = build_extra_kwargs(
extra, values, all_required_field_names
)
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
client_params = {
"api_key": values["openai_api_key"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
if not values.get("client"):
values["client"] = openai.OpenAI(**client_params).completions
if not values.get("async_client"):
values["async_client"] = openai.AsyncOpenAI(**client_params).completions
elif not values.get("client"):
values["client"] = openai.Completion
else:
pass
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
normal_params: Dict[str, Any] = {
"temperature": self.temperature,
"top_p": self.top_p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
if self.max_tokens is not None:
normal_params["max_tokens"] = self.max_tokens
if self.request_timeout is not None and not is_openai_v1():
normal_params["request_timeout"] = self.request_timeout
# Azure gpt-35-turbo doesn't support best_of
# don't specify best_of if it is 1
if self.best_of > 1:
normal_params["best_of"] = self.best_of
return {**normal_params, **self.model_kwargs}
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
for stream_resp in completion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
params = {**self._invocation_params, **kwargs, "stream": True}
self.get_sub_prompts(params, [prompt], stop) # this mutates params
async for stream_resp in await acompletion_with_retry(
self, prompt=prompt, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
chunk = _stream_response_to_generation_chunk(stream_resp)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"]
if chunk.generation_info
else None,
)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint with k unique prompts.
Args:
prompts: The prompts to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The full LLM output.
Example:
.. code-block:: python
response = openai.generate(["Tell me a joke."])
"""
# TODO: write a unit test for this
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = completion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
# V1 client returns the response in an PyDantic object instead of
# dict. For the transition period, we deep convert it to dict.
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
if not system_fingerprint:
system_fingerprint = response.get("system_fingerprint")
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Call out to OpenAI's endpoint async with k unique prompts."""
params = self._invocation_params
params = {**params, **kwargs}
sub_prompts = self.get_sub_prompts(params, prompts, stop)
choices = []
token_usage: Dict[str, int] = {}
# Get the token usage from the response.
# Includes prompt, completion, and total tokens used.
_keys = {"completion_tokens", "prompt_tokens", "total_tokens"}
system_fingerprint: Optional[str] = None
for _prompts in sub_prompts:
if self.streaming:
if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.")
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
choices.append(
{
"text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason")
if generation.generation_info
else None,
"logprobs": generation.generation_info.get("logprobs")
if generation.generation_info
else None,
}
)
else:
response = await acompletion_with_retry(
self, prompt=_prompts, run_manager=run_manager, **params
)
if not isinstance(response, dict):
response = response.dict()
choices.extend(response["choices"])
update_token_usage(_keys, response, token_usage)
return self.create_llm_result(
choices,
prompts,
params,
token_usage,
system_fingerprint=system_fingerprint,
)
def get_sub_prompts(
self,
params: Dict[str, Any],
prompts: List[str],
stop: Optional[List[str]] = None,
) -> List[List[str]]:
"""Get the sub prompts for llm call."""
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params["max_tokens"] == -1:
if len(prompts) != 1:
raise ValueError(
"max_tokens set to -1 not supported for multiple inputs."
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
def create_llm_result(
self,
choices: Any,
prompts: List[str],
params: Dict[str, Any],
token_usage: Dict[str, int],
*,
system_fingerprint: Optional[str] = None,
) -> LLMResult:
"""Create the LLMResult from the choices and prompts."""
generations = []
n = params.get("n", self.n)
for i, _ in enumerate(prompts):
sub_choices = choices[i * n : (i + 1) * n]
generations.append(
[
Generation(
text=choice["text"],
generation_info=dict(
finish_reason=choice.get("finish_reason"),
logprobs=choice.get("logprobs"),
),
)
for choice in sub_choices
]
)
llm_output = {"token_usage": token_usage, "model_name": self.model_name}
if system_fingerprint:
llm_output["system_fingerprint"] = system_fingerprint
return LLMResult(generations=generations, llm_output=llm_output)
@property
def _invocation_params(self) -> Dict[str, Any]:
"""Get the parameters used to invoke the model."""
openai_creds: Dict[str, Any] = {}
if not is_openai_v1():
openai_creds.update(
{
"api_key": self.openai_api_key,
"api_base": self.openai_api_base,
"organization": self.openai_organization,
}
)
if self.openai_proxy:
import openai
openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_num_tokens(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
model_name = self.tiktoken_model_name or self.model_name
try:
enc = tiktoken.encoding_for_model(model_name)
except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.")
model = "cl100k_base"
enc = tiktoken.get_encoding(model)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
@staticmethod
def modelname_to_contextsize(modelname: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a model.
Args:
modelname: The modelname we want to know the context size for.
Returns:
The maximum context size
Example:
.. code-block:: python
max_tokens = openai.modelname_to_contextsize("text-davinci-003")
"""
model_token_mapping = {
"gpt-4": 8192,
"gpt-4-0314": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-32k-0314": 32768,
"gpt-4-32k-0613": 32768,
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0301": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16385,
"gpt-3.5-turbo-16k-0613": 16385,
"gpt-3.5-turbo-instruct": 4096,
"text-ada-001": 2049,
"ada": 2049,
"text-babbage-001": 2040,
"babbage": 2049,
"text-curie-001": 2049,
"curie": 2049,
"davinci": 2049,
"text-davinci-003": 4097,
"text-davinci-002": 4097,
"code-davinci-002": 8001,
"code-davinci-001": 8001,
"code-cushman-002": 2048,
"code-cushman-001": 2048,
}
# handling finetuned models
if "ft-" in modelname:
modelname = modelname.split(":")[0]
context_size = model_token_mapping.get(modelname, None)
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
)
return context_size
@property
def max_context_size(self) -> int:
"""Get max context size for this model."""
return self.modelname_to_contextsize(self.model_name)
def max_tokens_for_prompt(self, prompt: str) -> int:
"""Calculate the maximum number of tokens possible to generate for a prompt.
Args:
prompt: The prompt to pass into the model.
Returns:
The maximum number of tokens to generate for a prompt.
Example:
.. code-block:: python
max_tokens = openai.max_token_for_prompt("Tell me a joke.")
"""
num_tokens = self.get_num_tokens(prompt)
return self.max_context_size - num_tokens
class OpenAI(BaseOpenAI):
"""OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAI
openai = OpenAI(model_name="text-davinci-003")
"""
@property
def _invocation_params(self) -> Dict[str, Any]:
return {**{"model": self.model_name}, **super()._invocation_params}
class AzureOpenAI(BaseOpenAI):
"""Azure-specific OpenAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import AzureOpenAI
openai = AzureOpenAI(model_name="text-davinci-003")
"""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment_name: Union[str, None] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_version: str = Field(default="", alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
""" # noqa: E501
azure_ad_token_provider: Union[str, None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
openai_api_type: str = ""
"""Legacy, for openai<1.0.0 support."""
validate_base_url: bool = True
"""For backwards compatibility. If legacy val openai_api_base is passed in, try to
infer if it is a base_url or azure_endpoint and update accordingly.
"""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["streaming"] and values["n"] > 1:
raise ValueError("Cannot stream results when n > 1.")
if values["streaming"] and values["best_of"] > 1:
raise ValueError("Cannot stream results when best_of > 1.")
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] = (
values["openai_api_base"].rstrip("/") + "/openai"
)
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment_name"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment_name` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment_name` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment_name"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment_name"]
)
values["deployment_name"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment_name"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).completions
values["async_client"] = openai.AsyncAzureOpenAI(
**client_params
).completions
else:
values["client"] = openai.Completion
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {
**{"deployment_name": self.deployment_name},
**super()._identifying_params,
}
@property
def _invocation_params(self) -> Dict[str, Any]:
if is_openai_v1():
openai_params = {"model": self.deployment_name}
else:
openai_params = {
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
return {**openai_params, **super()._invocation_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "azure"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
class OpenAIChat(BaseLLM):
"""OpenAI Chat large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import OpenAIChat
openaichat = OpenAIChat(model_name="gpt-3.5-turbo")
"""
client: Any = Field(default=None, exclude=True) #: :meta private:
async_client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = "gpt-3.5-turbo"
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
# When updating this to use a SecretStr
# Check for classes that derive from this class (as some of them
# may assume openai_api_key is a str)
openai_api_key: Optional[str] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `OPENAI_API_KEY` if not provided."""
openai_api_base: Optional[str] = Field(default=None, alias="base_url")
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator."""
# to support explicit proxy for OpenAI
openai_proxy: Optional[str] = None
max_retries: int = 6
"""Maximum number of retries to make when generating."""
prefix_messages: List = Field(default_factory=list)
"""Series of messages for Chat input."""
streaming: bool = False
"""Whether to stream the results or not."""
allowed_special: Union[Literal["all"], AbstractSet[str]] = set()
"""Set of special tokens that are allowed。"""
disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。"""
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_api_base = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
openai_proxy = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
openai_organization = get_from_dict_or_env(
values, "openai_organization", "OPENAI_ORGANIZATION", default=""
)
try:
import openai
openai.api_key = openai_api_key
if openai_api_base:
openai.api_base = openai_api_base
if openai_organization:
openai.organization = openai_organization
if openai_proxy:
openai.proxy = {"http": openai_proxy, "https": openai_proxy} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
warnings.warn(
"You are trying to use a chat model. This way of initializing it is "
"no longer supported. Instead, please use: "
"`from langchain.chat_models import ChatOpenAI`"
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return self.model_kwargs
def _get_chat_params(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> Tuple:
if len(prompts) > 1:
raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}"
)
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}]
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params}
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit
del params["max_tokens"]
return messages, params
def _stream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
for stream_resp in completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
run_manager.on_llm_new_token(token, chunk=chunk)
async def _astream(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
messages, params = self._get_chat_params([prompt], stop)
params = {**params, **kwargs, "stream": True}
async for stream_resp in await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
):
if not isinstance(stream_resp, dict):
stream_resp = stream_resp.dict()
token = stream_resp["choices"][0]["delta"].get("content", "")
chunk = GenerationChunk(text=token)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(token, chunk=chunk)
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
for chunk in self._stream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = completion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
if self.streaming:
generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs):
if generation is None:
generation = chunk
else:
generation += chunk
assert generation is not None
return LLMResult(generations=[[generation]])
messages, params = self._get_chat_params(prompts, stop)
params = {**params, **kwargs}
full_response = await acompletion_with_retry(
self, messages=messages, run_manager=run_manager, **params
)
if not isinstance(full_response, dict):
full_response = full_response.dict()
llm_output = {
"token_usage": full_response["usage"],
"model_name": self.model_name,
}
return LLMResult(
generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])]
],
llm_output=llm_output,
)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "openai-chat"
def get_token_ids(self, text: str) -> List[int]:
"""Get the token IDs using the tiktoken package."""
# tiktoken NOT supported for Python < 3.8
if sys.version_info[1] < 8:
return super().get_token_ids(text)
try:
import tiktoken
except ImportError:
raise ImportError(
"Could not import tiktoken python package. "
"This is needed in order to calculate get_num_tokens. "
"Please install it with `pip install tiktoken`."
)
enc = tiktoken.encoding_for_model(self.model_name)
return enc.encode(
text,
allowed_special=self.allowed_special,
disallowed_special=self.disallowed_special,
)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,175 | ModuleNotFoundError: No module named 'clarifai.auth' | ### System Info
platform: Vagrant - Ubuntu 2204
python: 3.9.18
langchain version: 0.0.344
langchain core: 0.0.8
clarifai: 9.10.4
### Who can help?
@hwchase17 @agola11
### Information
- [X] The official example notebooks/scripts
- [x] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
1. Install the latest version of clarifai (9.10.4)
2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai
``` bash
Could not import clarifai python package. Please install it with `pip install clarifai`.
File 'clarifai.py', line 77, in validate_environment:
raise ImportError( Traceback (most recent call last):
File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment
from clarifai.auth.helper import ClarifaiAuthHelper
ModuleNotFoundError: No module named 'clarifai.auth'
```
### Expected behavior
I expect **ClarifaiAuthHelper** to import correctly.
In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way:
``` python
from clarifai.client.auth.helper import ClarifaiAuthHelper
``` | https://github.com/langchain-ai/langchain/issues/14175 | https://github.com/langchain-ai/langchain/pull/14215 | ca8a022cd937ba398bb5544f4428f6ceafe56b84 | 8504ec56e4fc25308ba5baa4beaca944d9ff3371 | "2023-12-02T15:28:09Z" | python | "2023-12-04T19:53:34Z" | libs/langchain/langchain/embeddings/clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ClarifaiEmbeddings(BaseModel, Embeddings):
"""Clarifai embedding models.
To use, you should have the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import ClarifaiEmbeddings
clarifai = ClarifaiEmbeddings(
model="embed-english-light-v3.0", clarifai_api_key="my-api-key"
)
"""
stub: Any #: :meta private:
"""Clarifai stub."""
userDataObject: Any
"""Clarifai user data object."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
"""Clarifai personal access token to use."""
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
batch_size = 32
embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=t))
)
for t in batch
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings.extend(
[
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Clarifai's embedding models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=text))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs[0])
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings = [
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
return embeddings[0]
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,175 | ModuleNotFoundError: No module named 'clarifai.auth' | ### System Info
platform: Vagrant - Ubuntu 2204
python: 3.9.18
langchain version: 0.0.344
langchain core: 0.0.8
clarifai: 9.10.4
### Who can help?
@hwchase17 @agola11
### Information
- [X] The official example notebooks/scripts
- [x] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
1. Install the latest version of clarifai (9.10.4)
2. Run the example: https://python.langchain.com/docs/integrations/llms/clarifai
``` bash
Could not import clarifai python package. Please install it with `pip install clarifai`.
File 'clarifai.py', line 77, in validate_environment:
raise ImportError( Traceback (most recent call last):
File "/home/vagrant/.virtualenvs/env/lib/python3.9/site-packages/langchain/llms/clarifai.py", line 74, in validate_environment
from clarifai.auth.helper import ClarifaiAuthHelper
ModuleNotFoundError: No module named 'clarifai.auth'
```
### Expected behavior
I expect **ClarifaiAuthHelper** to import correctly.
In the latest version of clarifai **ClarifaiAuthHelper** is imported in this way:
``` python
from clarifai.client.auth.helper import ClarifaiAuthHelper
``` | https://github.com/langchain-ai/langchain/issues/14175 | https://github.com/langchain-ai/langchain/pull/14215 | ca8a022cd937ba398bb5544f4428f6ceafe56b84 | 8504ec56e4fc25308ba5baa4beaca944d9ff3371 | "2023-12-02T15:28:09Z" | python | "2023-12-04T19:53:34Z" | libs/langchain/langchain/llms/clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain_core.outputs import Generation, LLMResult
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class Clarifai(LLM):
"""Clarifai large language models.
To use, you should have an account on the Clarifai platform,
the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your PAT key,
or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Clarifai
clarifai_llm = Clarifai(pat=CLARIFAI_PAT, \
user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
"""
stub: Any #: :meta private:
userDataObject: Any
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Clarifai API."""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{
"user_id": self.user_id,
"app_id": self.app_id,
"model_id": self.model_id,
}
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "clarifai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Clarfai's PostModelOutputs endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = clarifai_llm("Tell me a joke.")
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
# The userDataObject is created in the overview and
# is required when using a PAT
# If version_id None, Defaults to the latest model version
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_model_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_model_failure}"
)
text = post_model_outputs_response.outputs[0].data.text.raw
# In order to make this consistent with other endpoints, we strip them.
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
# TODO: add caching here.
generations = []
batch_size = 32
for i in range(0, len(prompts), batch_size):
batch = prompts[i : i + batch_size]
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=prompt))
)
for prompt in batch
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_model_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_model_failure}"
)
for output in post_model_outputs_response.outputs:
if stop is not None:
text = enforce_stop_tokens(output.data.text.raw, stop)
else:
text = output.data.text.raw
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,159 | create_tagging_chain_pydantic() pydantic schema validation errors | ### System Info
[pydantic_chain_test_code.txt](https://github.com/langchain-ai/langchain/files/13532796/pydantic_chain_test_code.txt)
[error_log.txt](https://github.com/langchain-ai/langchain/files/13532799/error_log.txt)
### Who can help?
@agola11 @hwchase17
The attached code is adopted from the official example from https://python.langchain.com/docs/use_cases/tagging. I am using the AzureChatOpenAI API, but I don't think the cause is related to that. I added an example prompt to ensure that the llm is working properly.
It appears that even when passing in a confirmed subclass of BaseModel, which is pydantic_schema, create_tagging_chain_pydantic(Tags, llm) is triggering validation errors. Please see the attached test code and error log.
### Information
- [X] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [X] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
Run the attached code and match it with the attached error log, which shows the version numbers of Python, Langchain and Pydantic. I am running on WIndows 11.:
Python version: 3.11.5 | packaged by Anaconda, Inc. | (main, Sep 11 2023, 13:26:23) [MSC v.1916 64 bit (AMD64)]
Langchain version: 0.0.344
Pydantic version: 2.5.2
### Expected behavior
Expected behavior is as shown in the last example on https://python.langchain.com/docs/use_cases/tagging
Tags(sentiment='sad', aggressiveness=5, language='spanish') | https://github.com/langchain-ai/langchain/issues/14159 | https://github.com/langchain-ai/langchain/pull/14165 | 641e401ba857e7f6f895b54fbb02c0560d2283a5 | 702a6d7044b01b82730e32dd61c83fd99c6272ab | "2023-12-02T00:35:53Z" | python | "2023-12-04T20:06:04Z" | docs/docs/use_cases/tagging.ipynb | {
"cells": [
{
"cell_type": "raw",
"id": "cb6f552e-775f-4d84-bc7c-dca94c06a33c",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 1\n",
"title: Tagging\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "a0507a4b",
"metadata": {},
"source": [
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/tagging.ipynb)\n",
"\n",
"## Use case\n",
"\n",
"Tagging means labeling a document with classes such as:\n",
"\n",
"- sentiment\n",
"- language\n",
"- style (formal, informal etc.)\n",
"- covered topics\n",
"- political tendency\n",
"\n",
"![Image description](/img/tagging.png)\n",
"\n",
"## Overview\n",
"\n",
"Tagging has a few components:\n",
"\n",
"* `function`: Like [extraction](/docs/use_cases/extraction), tagging uses [functions](https://openai.com/blog/function-calling-and-other-api-updates) to specify how the model should tag a document\n",
"* `schema`: defines how we want to tag the document\n",
"\n",
"## Quickstart\n",
"\n",
"Let's see a very straightforward example of how we can use OpenAI functions for tagging in LangChain."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc5cbb6f",
"metadata": {},
"outputs": [],
"source": [
"!pip install langchain openai\n",
"\n",
"# Set env var OPENAI_API_KEY or load from a .env file:\n",
"# import dotenv\n",
"# dotenv.load_dotenv()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "bafb496a",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic\n",
"from langchain.chat_models import ChatOpenAI"
]
},
{
"cell_type": "markdown",
"id": "b8ca3f93",
"metadata": {},
"source": [
"We specify a few properties with their expected type in our schema."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "39f3ce3e",
"metadata": {},
"outputs": [],
"source": [
"# Schema\n",
"schema = {\n",
" \"properties\": {\n",
" \"sentiment\": {\"type\": \"string\"},\n",
" \"aggressiveness\": {\"type\": \"integer\"},\n",
" \"language\": {\"type\": \"string\"},\n",
" }\n",
"}\n",
"\n",
"# LLM\n",
"llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0613\")\n",
"chain = create_tagging_chain(schema, llm)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "5509b6a6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'sentiment': 'positive', 'language': 'Spanish'}"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
"chain.run(inp)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "9154474c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'sentiment': 'enojado', 'aggressiveness': 1, 'language': 'es'}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
"chain.run(inp)"
]
},
{
"cell_type": "markdown",
"id": "d921bb53",
"metadata": {},
"source": [
"As we can see in the examples, it correctly interprets what we want.\n",
"\n",
"The results vary so that we get, for example, sentiments in different languages ('positive', 'enojado' etc.).\n",
"\n",
"We will see how to control these results in the next section."
]
},
{
"cell_type": "markdown",
"id": "bebb2f83",
"metadata": {},
"source": [
"## Finer control\n",
"\n",
"Careful schema definition gives us more control over the model's output. \n",
"\n",
"Specifically, we can define:\n",
"\n",
"- possible values for each property\n",
"- description to make sure that the model understands the property\n",
"- required properties to be returned"
]
},
{
"cell_type": "markdown",
"id": "69ef0b9a",
"metadata": {},
"source": [
"Here is an example of how we can use `_enum_`, `_description_`, and `_required_` to control for each of the previously mentioned aspects:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "6a5f7961",
"metadata": {},
"outputs": [],
"source": [
"schema = {\n",
" \"properties\": {\n",
" \"aggressiveness\": {\n",
" \"type\": \"integer\",\n",
" \"enum\": [1, 2, 3, 4, 5],\n",
" \"description\": \"describes how aggressive the statement is, the higher the number the more aggressive\",\n",
" },\n",
" \"language\": {\n",
" \"type\": \"string\",\n",
" \"enum\": [\"spanish\", \"english\", \"french\", \"german\", \"italian\"],\n",
" },\n",
" },\n",
" \"required\": [\"language\", \"sentiment\", \"aggressiveness\"],\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "e5a5881f",
"metadata": {},
"outputs": [],
"source": [
"chain = create_tagging_chain(schema, llm)"
]
},
{
"cell_type": "markdown",
"id": "5ded2332",
"metadata": {},
"source": [
"Now the answers are much better!"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "d9b9d53d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'aggressiveness': 0, 'language': 'spanish'}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inp = \"Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!\"\n",
"chain.run(inp)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1c12fa00",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'aggressiveness': 5, 'language': 'spanish'}"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
"chain.run(inp)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "0bdfcb05",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'aggressiveness': 0, 'language': 'english'}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"inp = \"Weather is ok here, I can go outside without much more than a coat\"\n",
"chain.run(inp)"
]
},
{
"cell_type": "markdown",
"id": "cf6b7389",
"metadata": {},
"source": [
"The [LangSmith trace](https://smith.langchain.com/public/311e663a-bbe8-4053-843e-5735055c032d/r) lets us peek under the hood:\n",
"\n",
"* As with [extraction](/docs/use_cases/extraction), we call the `information_extraction` function [here](https://github.com/langchain-ai/langchain/blob/269f85b7b7ffd74b38cd422d4164fc033388c3d0/libs/langchain/langchain/chains/openai_functions/extraction.py#L20) on the input string.\n",
"* This OpenAI function extraction information based upon the provided schema.\n",
"\n",
"![Image description](/img/tagging_trace.png)"
]
},
{
"cell_type": "markdown",
"id": "e68ad17e",
"metadata": {},
"source": [
"## Pydantic"
]
},
{
"cell_type": "markdown",
"id": "2f5970ec",
"metadata": {},
"source": [
"We can also use a Pydantic schema to specify the required properties and types. \n",
"\n",
"We can also send other arguments, such as `enum` or `description`, to each field.\n",
"\n",
"This lets us specify our schema in the same manner that we would a new class or function in Python with purely Pythonic types."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "bf1f367e",
"metadata": {},
"outputs": [],
"source": [
"from pydantic import BaseModel, Field"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "83a2e826",
"metadata": {},
"outputs": [],
"source": [
"class Tags(BaseModel):\n",
" sentiment: str = Field(..., enum=[\"happy\", \"neutral\", \"sad\"])\n",
" aggressiveness: int = Field(\n",
" ...,\n",
" description=\"describes how aggressive the statement is, the higher the number the more aggressive\",\n",
" enum=[1, 2, 3, 4, 5],\n",
" )\n",
" language: str = Field(\n",
" ..., enum=[\"spanish\", \"english\", \"french\", \"german\", \"italian\"]\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "6e404892",
"metadata": {},
"outputs": [],
"source": [
"chain = create_tagging_chain_pydantic(Tags, llm)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "b5fc43c4",
"metadata": {},
"outputs": [],
"source": [
"inp = \"Estoy muy enojado con vos! Te voy a dar tu merecido!\"\n",
"res = chain.run(inp)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "5074bcc3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Tags(sentiment='sad', aggressiveness=5, language='spanish')"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"res"
]
},
{
"cell_type": "markdown",
"id": "29346d09",
"metadata": {},
"source": [
"### Going deeper\n",
"\n",
"* You can use the [metadata tagger](https://python.langchain.com/docs/integrations/document_transformers/openai_metadata_tagger) document transformer to extract metadata from a LangChain `Document`. \n",
"* This covers the same basic functionality as the tagging chain, only applied to a LangChain `Document`."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 11,737 | `extra_tools` argument in `create_pandas_dataframe_agent()` doesn't seem to be working | ### System Info
Platform: Windows Server 2022
Python: 3.11.6
Langchain version: 0.0.306
### Who can help?
@agola11 @hwchase17
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [X] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [X] Agents / Agent Executors
- [X] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
### Creating a test tool
```py
from langchain.agents import Tool
tools = [
Tool(
name="test_tool",
func=print,
description="This is a test tool"
)
]
tools
```
```
[Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)]
```
### Setting up the pandas_dataframe_agent
```py
from langchain.agents import create_pandas_dataframe_agent
from langchain.llms import HuggingFacePipeline
import pandas as pd
llm = HuggingFacePipeline.from_model_id(
model_id="google/flan-t5-small", task="text2text-generation", device=0
)
agent = create_pandas_dataframe_agent(llm, pd.DataFrame(), verbose=True, extra_tools=tools)
agent.tools
```
```
[PythonAstREPLTool(name='python_repl_ast', description='A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.', args_schema=<class 'langchain.tools.python.tool.PythonInputs'>, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, globals={}, locals={'df': Empty DataFrame
Columns: []
Index: []}, sanitize_input=True),
Tool(name='test_tool', description='This is a test tool', args_schema=None, return_direct=False, verbose=False, callbacks=None, callback_manager=None, tags=None, metadata=None, handle_tool_error=False, func=<built-in function print>, coroutine=None)]
```
### Executing agent with debugging enabled
```py
import langchain
langchain.debug = True
agent.run('What is 2+2?')
```
```
[chain/start] [1:chain:AgentExecutor] Entering Chain run with input:
{
"input": "What is 2+2?"
}
[chain/start] [1:chain:AgentExecutor > 2:chain:LLMChain] Entering Chain run with input:
{
"input": "What is 2+2?",
"agent_scratchpad": "",
"stop": [
"\nObservation:",
"\n\tObservation:"
]
}
[llm/start] [1:chain:AgentExecutor > 2:chain:LLMChain > 3:llm:HuggingFacePipeline] Entering LLM run with input:
{
"prompts": [
"You are working with a pandas dataframe in Python. The name of the dataframe is `df`.\nYou should use the tools below to answer the question posed of you:\n\npython_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [python_repl_ast]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\n\nThis is the result of `print(df.head())`:\n\n\nBegin!\nQuestion: What is 2+2?"
]
}
```
### The prompt from the above log
```
You are working with a pandas dataframe in Python. The name of the dataframe is `df`.
You should use the tools below to answer the question posed of you:
python_repl_ast: A Python shell. Use this to execute python commands. Input should be a valid python command. When using this tool, sometimes output is abbreviated - make sure it does not look abbreviated before using it in your answer.
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [python_repl_ast]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
This is the result of `print(df.head())`:
Begin!
Question: What is 2+2?
```
### Expected behavior
Where did my custom tool `test_tool` disappear? I expected it to show up after python_repl_ast? | https://github.com/langchain-ai/langchain/issues/11737 | https://github.com/langchain-ai/langchain/pull/13203 | 77a15fa9888a3e81a014895a6ec3f1b34c016d06 | f758c8adc43ebbbdb3a13caa5a022a2d043229cc | "2023-10-12T22:22:09Z" | python | "2023-12-05T04:54:08Z" | libs/experimental/langchain_experimental/agents/agent_toolkits/pandas/base.py | """Agent for working with pandas objects."""
from typing import Any, Dict, List, Optional, Sequence, Tuple
from langchain.agents.agent import AgentExecutor, BaseSingleActionAgent
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.agents.types import AgentType
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.llm import LLMChain
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import SystemMessage
from langchain.tools import BaseTool
from langchain_experimental.agents.agent_toolkits.pandas.prompt import (
FUNCTIONS_WITH_DF,
FUNCTIONS_WITH_MULTI_DF,
MULTI_DF_PREFIX,
MULTI_DF_PREFIX_FUNCTIONS,
PREFIX,
PREFIX_FUNCTIONS,
SUFFIX_NO_DF,
SUFFIX_WITH_DF,
SUFFIX_WITH_MULTI_DF,
)
from langchain_experimental.tools.python.tool import PythonAstREPLTool
def _get_multi_prompt(
dfs: List[Any],
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
num_dfs = len(dfs)
if suffix is not None:
suffix_to_use = suffix
include_dfs_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_MULTI_DF
include_dfs_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_dfs_head = False
if input_variables is None:
input_variables = ["input", "agent_scratchpad", "num_dfs"]
if include_dfs_head:
input_variables += ["dfs_head"]
if prefix is None:
prefix = MULTI_DF_PREFIX
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f"df{i + 1}"] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables
)
partial_prompt = prompt.partial()
if "dfs_head" in input_variables:
dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs])
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs), dfs_head=dfs_head)
if "num_dfs" in input_variables:
partial_prompt = partial_prompt.partial(num_dfs=str(num_dfs))
return partial_prompt, tools
def _get_single_prompt(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
include_df_head = True
elif include_df_in_prompt:
suffix_to_use = SUFFIX_WITH_DF
include_df_head = True
else:
suffix_to_use = SUFFIX_NO_DF
include_df_head = False
if input_variables is None:
input_variables = ["input", "agent_scratchpad"]
if include_df_head:
input_variables += ["df_head"]
if prefix is None:
prefix = PREFIX
tools = [PythonAstREPLTool(locals={"df": df})]
prompt = ZeroShotAgent.create_prompt(
tools, prefix=prefix, suffix=suffix_to_use, input_variables=input_variables
)
partial_prompt = prompt.partial()
if "df_head" in input_variables:
partial_prompt = partial_prompt.partial(
df_head=str(df.head(number_of_head_rows).to_markdown())
)
return partial_prompt, tools
def _get_prompt_and_tools(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
try:
import pandas as pd
pd.set_option("display.max_columns", None)
except ImportError:
raise ImportError(
"pandas package not found, please install with `pip install pandas`"
)
if include_df_in_prompt is not None and suffix is not None:
raise ValueError("If suffix is specified, include_df_in_prompt should not be.")
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_multi_prompt(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_single_prompt(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
def _get_functions_single_prompt(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
suffix_to_use = suffix_to_use.format(
df_head=str(df.head(number_of_head_rows).to_markdown())
)
elif include_df_in_prompt:
suffix_to_use = FUNCTIONS_WITH_DF.format(
df_head=str(df.head(number_of_head_rows).to_markdown())
)
else:
suffix_to_use = ""
if prefix is None:
prefix = PREFIX_FUNCTIONS
tools = [PythonAstREPLTool(locals={"df": df})]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
def _get_functions_multi_prompt(
dfs: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
if suffix is not None:
suffix_to_use = suffix
if include_df_in_prompt:
dfs_head = "\n\n".join(
[d.head(number_of_head_rows).to_markdown() for d in dfs]
)
suffix_to_use = suffix_to_use.format(
dfs_head=dfs_head,
)
elif include_df_in_prompt:
dfs_head = "\n\n".join([d.head(number_of_head_rows).to_markdown() for d in dfs])
suffix_to_use = FUNCTIONS_WITH_MULTI_DF.format(
dfs_head=dfs_head,
)
else:
suffix_to_use = ""
if prefix is None:
prefix = MULTI_DF_PREFIX_FUNCTIONS
prefix = prefix.format(num_dfs=str(len(dfs)))
df_locals = {}
for i, dataframe in enumerate(dfs):
df_locals[f"df{i + 1}"] = dataframe
tools = [PythonAstREPLTool(locals=df_locals)]
system_message = SystemMessage(content=prefix + suffix_to_use)
prompt = OpenAIFunctionsAgent.create_prompt(system_message=system_message)
return prompt, tools
def _get_functions_prompt_and_tools(
df: Any,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
) -> Tuple[BasePromptTemplate, List[PythonAstREPLTool]]:
try:
import pandas as pd
pd.set_option("display.max_columns", None)
except ImportError:
raise ImportError(
"pandas package not found, please install with `pip install pandas`"
)
if input_variables is not None:
raise ValueError("`input_variables` is not supported at the moment.")
if include_df_in_prompt is not None and suffix is not None:
raise ValueError("If suffix is specified, include_df_in_prompt should not be.")
if isinstance(df, list):
for item in df:
if not isinstance(item, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_functions_multi_prompt(
df,
prefix=prefix,
suffix=suffix,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
else:
if not isinstance(df, pd.DataFrame):
raise ValueError(f"Expected pandas object, got {type(df)}")
return _get_functions_single_prompt(
df,
prefix=prefix,
suffix=suffix,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
def create_pandas_dataframe_agent(
llm: BaseLanguageModel,
df: Any,
agent_type: AgentType = AgentType.ZERO_SHOT_REACT_DESCRIPTION,
callback_manager: Optional[BaseCallbackManager] = None,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
input_variables: Optional[List[str]] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
max_iterations: Optional[int] = 15,
max_execution_time: Optional[float] = None,
early_stopping_method: str = "force",
agent_executor_kwargs: Optional[Dict[str, Any]] = None,
include_df_in_prompt: Optional[bool] = True,
number_of_head_rows: int = 5,
extra_tools: Sequence[BaseTool] = (),
**kwargs: Dict[str, Any],
) -> AgentExecutor:
"""Construct a pandas agent from an LLM and dataframe."""
agent: BaseSingleActionAgent
if agent_type == AgentType.ZERO_SHOT_REACT_DESCRIPTION:
prompt, base_tools = _get_prompt_and_tools(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
tools = base_tools + list(extra_tools)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
agent = ZeroShotAgent(
llm_chain=llm_chain,
allowed_tools=tool_names,
callback_manager=callback_manager,
**kwargs,
)
elif agent_type == AgentType.OPENAI_FUNCTIONS:
_prompt, base_tools = _get_functions_prompt_and_tools(
df,
prefix=prefix,
suffix=suffix,
input_variables=input_variables,
include_df_in_prompt=include_df_in_prompt,
number_of_head_rows=number_of_head_rows,
)
tools = base_tools + list(extra_tools)
agent = OpenAIFunctionsAgent(
llm=llm,
prompt=_prompt,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
else:
raise ValueError(f"Agent type {agent_type} not supported at the moment.")
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
callback_manager=callback_manager,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
max_iterations=max_iterations,
max_execution_time=max_execution_time,
early_stopping_method=early_stopping_method,
**(agent_executor_kwargs or {}),
)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | docs/docs/modules/data_connection/retrievers/multi_vector.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "d9172545",
"metadata": {},
"source": [
"# MultiVector Retriever\n",
"\n",
"It can often be beneficial to store multiple vectors per document. There are multiple use cases where this is beneficial. LangChain has a base `MultiVectorRetriever` which makes querying this type of setup easy. A lot of the complexity lies in how to create the multiple vectors per document. This notebook covers some of the common ways to create those vectors and use the `MultiVectorRetriever`.\n",
"\n",
"The methods to create multiple vectors per document include:\n",
"\n",
"- Smaller chunks: split a document into smaller chunks, and embed those (this is ParentDocumentRetriever).\n",
"- Summary: create a summary for each document, embed that along with (or instead of) the document.\n",
"- Hypothetical questions: create hypothetical questions that each document would be appropriate to answer, embed those along with (or instead of) the document.\n",
"\n",
"\n",
"Note that this also enables another method of adding embeddings - manually. This is great because you can explicitly add questions or queries that should lead to a document being recovered, giving you more control."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "eed469be",
"metadata": {},
"outputs": [],
"source": [
"from langchain.retrievers.multi_vector import MultiVectorRetriever"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "18c1421a",
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores import Chroma"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "6d869496",
"metadata": {},
"outputs": [],
"source": [
"loaders = [\n",
" TextLoader(\"../../paul_graham_essay.txt\"),\n",
" TextLoader(\"../../state_of_the_union.txt\"),\n",
"]\n",
"docs = []\n",
"for l in loaders:\n",
" docs.extend(l.load())\n",
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000)\n",
"docs = text_splitter.split_documents(docs)"
]
},
{
"cell_type": "markdown",
"id": "fa17beda",
"metadata": {},
"source": [
"## Smaller chunks\n",
"\n",
"Often times it can be useful to retrieve larger chunks of information, but embed smaller chunks. This allows for embeddings to capture the semantic meaning as closely as possible, but for as much context as possible to be passed downstream. Note that this is what the `ParentDocumentRetriever` does. Here we show what is going on under the hood."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "0e7b6b45",
"metadata": {},
"outputs": [],
"source": [
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(\n",
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
")\n",
"# The storage layer for the parent documents\n",
"store = InMemoryStore()\n",
"id_key = \"doc_id\"\n",
"# The retriever (empty to start)\n",
"retriever = MultiVectorRetriever(\n",
" vectorstore=vectorstore,\n",
" base_store=store,\n",
" id_key=id_key,\n",
")\n",
"import uuid\n",
"\n",
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "72a36491",
"metadata": {},
"outputs": [],
"source": [
"# The splitter to use to create smaller chunks\n",
"child_text_splitter = RecursiveCharacterTextSplitter(chunk_size=400)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "5d23247d",
"metadata": {},
"outputs": [],
"source": [
"sub_docs = []\n",
"for i, doc in enumerate(docs):\n",
" _id = doc_ids[i]\n",
" _sub_docs = child_text_splitter.split_documents([doc])\n",
" for _doc in _sub_docs:\n",
" _doc.metadata[id_key] = _id\n",
" sub_docs.extend(_sub_docs)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "92ed5861",
"metadata": {},
"outputs": [],
"source": [
"retriever.vectorstore.add_documents(sub_docs)\n",
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "8afed60c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.', metadata={'doc_id': '59899493-92a0-41cb-b6ba-a854730ad74a', 'source': '../../state_of_the_union.txt'})"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Vectorstore alone retrieves the small chunks\n",
"retriever.vectorstore.similarity_search(\"justice breyer\")[0]"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "3c9017f1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"9875"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Retriever returns larger chunks\n",
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "cdef8339-f9fa-4b3b-955f-ad9dbdf2734f",
"metadata": {},
"source": [
"The default search type the retriever performs on the vector database is a similarity search. LangChain Vector Stores also support searching via [Max Marginal Relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search) so if you want this instead you can just set the `search_type` property as follows:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "36739460-a737-4a8e-b70f-50bf8c8eaae7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"9875"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain.retrievers.multi_vector import SearchType\n",
"\n",
"retriever.search_type = SearchType.mmr\n",
"\n",
"len(retriever.get_relevant_documents(\"justice breyer\")[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "d6a7ae0d",
"metadata": {},
"source": [
"## Summary\n",
"\n",
"Oftentimes a summary may be able to distill more accurately what a chunk is about, leading to better retrieval. Here we show how to create summaries, and then embed those."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "1433dff4",
"metadata": {},
"outputs": [],
"source": [
"import uuid\n",
"\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.prompts import ChatPromptTemplate\n",
"from langchain.schema.document import Document\n",
"from langchain.schema.output_parser import StrOutputParser"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "35b30390",
"metadata": {},
"outputs": [],
"source": [
"chain = (\n",
" {\"doc\": lambda x: x.page_content}\n",
" | ChatPromptTemplate.from_template(\"Summarize the following document:\\n\\n{doc}\")\n",
" | ChatOpenAI(max_retries=0)\n",
" | StrOutputParser()\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "41a2a738",
"metadata": {},
"outputs": [],
"source": [
"summaries = chain.batch(docs, {\"max_concurrency\": 5})"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "7ac5e4b1",
"metadata": {},
"outputs": [],
"source": [
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n",
"# The storage layer for the parent documents\n",
"store = InMemoryStore()\n",
"id_key = \"doc_id\"\n",
"# The retriever (empty to start)\n",
"retriever = MultiVectorRetriever(\n",
" vectorstore=vectorstore,\n",
" base_store=store,\n",
" id_key=id_key,\n",
")\n",
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "0d93309f",
"metadata": {},
"outputs": [],
"source": [
"summary_docs = [\n",
" Document(page_content=s, metadata={id_key: doc_ids[i]})\n",
" for i, s in enumerate(summaries)\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "6d5edf0d",
"metadata": {},
"outputs": [],
"source": [
"retriever.vectorstore.add_documents(summary_docs)\n",
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "862ae920",
"metadata": {},
"outputs": [],
"source": [
"# # We can also add the original chunks to the vectorstore if we so want\n",
"# for i, doc in enumerate(docs):\n",
"# doc.metadata[id_key] = doc_ids[i]\n",
"# retriever.vectorstore.add_documents(docs)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "299232d6",
"metadata": {},
"outputs": [],
"source": [
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "10e404c0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Document(page_content=\"The document is a speech given by the President of the United States. The President discusses various important issues and goals for the country, including nominating a Supreme Court Justice, securing the border and fixing the immigration system, protecting women's rights, supporting veterans, addressing the opioid epidemic, improving mental health care, and ending cancer. The President emphasizes the unity and strength of the American people and expresses optimism for the future of the nation.\", metadata={'doc_id': '8fdf4009-628c-400d-949c-1d3f4daf1e66'})"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sub_docs[0]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "e4cce5c2",
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "c8570dbb",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"9194"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(retrieved_docs[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "097a5396",
"metadata": {},
"source": [
"## Hypothetical Queries\n",
"\n",
"An LLM can also be used to generate a list of hypothetical questions that could be asked of a particular document. These questions can then be embedded"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "5219b085",
"metadata": {},
"outputs": [],
"source": [
"functions = [\n",
" {\n",
" \"name\": \"hypothetical_questions\",\n",
" \"description\": \"Generate hypothetical questions\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"questions\": {\n",
" \"type\": \"array\",\n",
" \"items\": {\"type\": \"string\"},\n",
" },\n",
" },\n",
" \"required\": [\"questions\"],\n",
" },\n",
" }\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "523deb92",
"metadata": {},
"outputs": [],
"source": [
"from langchain.output_parsers.openai_functions import JsonKeyOutputFunctionsParser\n",
"\n",
"chain = (\n",
" {\"doc\": lambda x: x.page_content}\n",
" # Only asking for 3 hypothetical questions, but this could be adjusted\n",
" | ChatPromptTemplate.from_template(\n",
" \"Generate a list of exactly 3 hypothetical questions that the below document could be used to answer:\\n\\n{doc}\"\n",
" )\n",
" | ChatOpenAI(max_retries=0, model=\"gpt-4\").bind(\n",
" functions=functions, function_call={\"name\": \"hypothetical_questions\"}\n",
" )\n",
" | JsonKeyOutputFunctionsParser(key_name=\"questions\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "11d30554",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[\"What were the author's initial areas of interest before college?\",\n",
" \"What was the author's experience with programming in his early years?\",\n",
" 'Why did the author switch his focus from AI to Lisp?']"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(docs[0])"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "3eb2e48c",
"metadata": {},
"outputs": [],
"source": [
"hypothetical_questions = chain.batch(docs, {\"max_concurrency\": 5})"
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "b2cd6e75",
"metadata": {},
"outputs": [],
"source": [
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(\n",
" collection_name=\"hypo-questions\", embedding_function=OpenAIEmbeddings()\n",
")\n",
"# The storage layer for the parent documents\n",
"store = InMemoryStore()\n",
"id_key = \"doc_id\"\n",
"# The retriever (empty to start)\n",
"retriever = MultiVectorRetriever(\n",
" vectorstore=vectorstore,\n",
" base_store=store,\n",
" id_key=id_key,\n",
")\n",
"doc_ids = [str(uuid.uuid4()) for _ in docs]"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "18831b3b",
"metadata": {},
"outputs": [],
"source": [
"question_docs = []\n",
"for i, question_list in enumerate(hypothetical_questions):\n",
" question_docs.extend(\n",
" [Document(page_content=s, metadata={id_key: doc_ids[i]}) for s in question_list]\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "224b24c5",
"metadata": {},
"outputs": [],
"source": [
"retriever.vectorstore.add_documents(question_docs)\n",
"retriever.docstore.mset(list(zip(doc_ids, docs)))"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "7b442b90",
"metadata": {},
"outputs": [],
"source": [
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "089b5ad0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='What made Robert Morris advise the author to leave Y Combinator?', metadata={'doc_id': '740e484e-d67c-45f7-989d-9928aaf51c28'}),\n",
" Document(page_content=\"How did the author's mother's illness affect his decision to leave Y Combinator?\", metadata={'doc_id': '740e484e-d67c-45f7-989d-9928aaf51c28'}),\n",
" Document(page_content='What led the author to start publishing essays online?', metadata={'doc_id': '675ccee3-ce0b-4d5d-892c-b8942370babd'}),\n",
" Document(page_content='What measures are being taken to secure the border and fix the immigration system?', metadata={'doc_id': '2d51f010-969e-48a9-9e82-6b12bc7ab3d4'})]"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"sub_docs"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "7594b24e",
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "4c120c65",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"9844"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(retrieved_docs[0].page_content)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | docs/docs/modules/data_connection/retrievers/parent_document_retriever.ipynb | {
"cells": [
{
"cell_type": "markdown",
"id": "34883374",
"metadata": {},
"source": [
"# Parent Document Retriever\n",
"\n",
"When splitting documents for retrieval, there are often conflicting desires:\n",
"\n",
"1. You may want to have small documents, so that their embeddings can most\n",
" accurately reflect their meaning. If too long, then the embeddings can\n",
" lose meaning.\n",
"2. You want to have long enough documents that the context of each chunk is\n",
" retained.\n",
"\n",
"The `ParentDocumentRetriever` strikes that balance by splitting and storing\n",
"small chunks of data. During retrieval, it first fetches the small chunks\n",
"but then looks up the parent ids for those chunks and returns those larger\n",
"documents.\n",
"\n",
"Note that \"parent document\" refers to the document that a small chunk\n",
"originated from. This can either be the whole raw document OR a larger\n",
"chunk."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "8b6e74b2",
"metadata": {},
"outputs": [],
"source": [
"from langchain.retrievers import ParentDocumentRetriever"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "1d17af96",
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"from langchain.embeddings import OpenAIEmbeddings\n",
"from langchain.storage import InMemoryStore\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"from langchain.vectorstores import Chroma"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "604ff981",
"metadata": {},
"outputs": [],
"source": [
"loaders = [\n",
" TextLoader(\"../../paul_graham_essay.txt\"),\n",
" TextLoader(\"../../state_of_the_union.txt\"),\n",
"]\n",
"docs = []\n",
"for l in loaders:\n",
" docs.extend(l.load())"
]
},
{
"cell_type": "markdown",
"id": "d3943f72",
"metadata": {},
"source": [
"## Retrieving full documents\n",
"\n",
"In this mode, we want to retrieve the full documents. Therefore, we only specify a child splitter."
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "1a8b2e5f",
"metadata": {},
"outputs": [],
"source": [
"# This text splitter is used to create the child documents\n",
"child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(\n",
" collection_name=\"full_documents\", embedding_function=OpenAIEmbeddings()\n",
")\n",
"# The storage layer for the parent documents\n",
"store = InMemoryStore()\n",
"retriever = ParentDocumentRetriever(\n",
" vectorstore=vectorstore,\n",
" docstore=store,\n",
" child_splitter=child_splitter,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "2b107935",
"metadata": {},
"outputs": [],
"source": [
"retriever.add_documents(docs, ids=None)"
]
},
{
"cell_type": "markdown",
"id": "d05b97b7",
"metadata": {},
"source": [
"This should yield two keys, because we added two documents."
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "30e3812b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['05fe8d8a-bf60-4f87-b576-4351b23df266',\n",
" '571cc9e5-9ef7-4f6c-b800-835c83a1858b']"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"list(store.yield_keys())"
]
},
{
"cell_type": "markdown",
"id": "f895d62b",
"metadata": {},
"source": [
"Let's now call the vector store search functionality - we should see that it returns small chunks (since we're storing the small chunks)."
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "b261c02c",
"metadata": {},
"outputs": [],
"source": [
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "5108222f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n"
]
}
],
"source": [
"print(sub_docs[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "bda8ed5a",
"metadata": {},
"source": [
"Let's now retrieve from the overall retriever. This should return large documents - since it returns the documents where the smaller chunks are located."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "419a91c4",
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "cf10d250",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"38539"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(retrieved_docs[0].page_content)"
]
},
{
"cell_type": "markdown",
"id": "14f813a5",
"metadata": {},
"source": [
"## Retrieving larger chunks\n",
"\n",
"Sometimes, the full documents can be too big to want to retrieve them as is. In that case, what we really want to do is to first split the raw documents into larger chunks, and then split it into smaller chunks. We then index the smaller chunks, but on retrieval we retrieve the larger chunks (but still not the full documents)."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "b6f9a4f0",
"metadata": {},
"outputs": [],
"source": [
"# This text splitter is used to create the parent documents\n",
"parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)\n",
"# This text splitter is used to create the child documents\n",
"# It should create documents smaller than the parent\n",
"child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)\n",
"# The vectorstore to use to index the child chunks\n",
"vectorstore = Chroma(\n",
" collection_name=\"split_parents\", embedding_function=OpenAIEmbeddings()\n",
")\n",
"# The storage layer for the parent documents\n",
"store = InMemoryStore()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "19478ff3",
"metadata": {},
"outputs": [],
"source": [
"retriever = ParentDocumentRetriever(\n",
" vectorstore=vectorstore,\n",
" docstore=store,\n",
" child_splitter=child_splitter,\n",
" parent_splitter=parent_splitter,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "fe16e620",
"metadata": {},
"outputs": [],
"source": [
"retriever.add_documents(docs)"
]
},
{
"cell_type": "markdown",
"id": "64ad3c8c",
"metadata": {},
"source": [
"We can see that there are much more than two documents now - these are the larger chunks."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "24d81886",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"66"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(list(store.yield_keys()))"
]
},
{
"cell_type": "markdown",
"id": "baaef673",
"metadata": {},
"source": [
"Let's make sure the underlying vector store still retrieves the small chunks."
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "b1c859de",
"metadata": {},
"outputs": [],
"source": [
"sub_docs = vectorstore.similarity_search(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "6fffa2eb",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court.\n"
]
}
],
"source": [
"print(sub_docs[0].page_content)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "3a3202df",
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "684fdb2c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1849"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(retrieved_docs[0].page_content)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "9f17f662",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \n",
"\n",
"We cannot let this happen. \n",
"\n",
"Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n",
"\n",
"Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n",
"\n",
"One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n",
"\n",
"And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. \n",
"\n",
"A former top litigator in private practice. A former federal public defender. And from a family of public school educators and police officers. A consensus builder. Since she’s been nominated, she’s received a broad range of support—from the Fraternal Order of Police to former judges appointed by Democrats and Republicans. \n",
"\n",
"And if we are to advance liberty and justice, we need to secure the Border and fix the immigration system. \n",
"\n",
"We can do both. At our border, we’ve installed new technology like cutting-edge scanners to better detect drug smuggling. \n",
"\n",
"We’ve set up joint patrols with Mexico and Guatemala to catch more human traffickers. \n",
"\n",
"We’re putting in place dedicated immigration judges so families fleeing persecution and violence can have their cases heard faster. \n",
"\n",
"We’re securing commitments and supporting partners in South and Central America to host more refugees and secure their own borders.\n"
]
}
],
"source": [
"print(retrieved_docs[0].page_content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "facfdacb",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | libs/langchain/langchain/retrievers/multi_vector.py | from enum import Enum
from typing import List, Optional
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
docstore: BaseStore[str, Document]
"""The storage layer for the parent documents"""
id_key: str
search_kwargs: dict
"""Keyword arguments to pass to the search function."""
search_type: SearchType
"""Type of search to perform (similarity / mmr)"""
def __init__(
self,
*,
vectorstore: VectorStore,
docstore: Optional[BaseStore[str, Document]] = None,
base_store: Optional[ByteStore] = None,
id_key: str = "doc_id",
search_kwargs: Optional[dict] = None,
search_type: SearchType = SearchType.similarity,
):
if base_store is not None:
docstore = create_kv_docstore(base_store)
elif docstore is None:
raise Exception("You must pass a `base_store` parameter.")
super().__init__(
vectorstore=vectorstore,
docstore=docstore,
id_key=id_key,
search_kwargs=search_kwargs if search_kwargs is not None else {},
search_type=search_type,
)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | libs/langchain/tests/unit_tests/indexes/test_indexing.py | from datetime import datetime
from typing import (
Any,
AsyncIterator,
Dict,
Iterable,
Iterator,
List,
Optional,
Sequence,
Type,
)
from unittest.mock import patch
import pytest
import pytest_asyncio
from langchain_core.documents import Document
from langchain_core.vectorstores import VST, VectorStore
import langchain.vectorstores
from langchain.document_loaders.base import BaseLoader
from langchain.embeddings.base import Embeddings
from langchain.indexes import aindex, index
from langchain.indexes._api import _abatch
from langchain.indexes._sql_record_manager import SQLRecordManager
class ToyLoader(BaseLoader):
"""Toy loader that always returns the same documents."""
def __init__(self, documents: Sequence[Document]) -> None:
"""Initialize with the documents to return."""
self.documents = documents
def lazy_load(
self,
) -> Iterator[Document]:
yield from self.documents
def load(self) -> List[Document]:
"""Load the documents from the source."""
return list(self.lazy_load())
async def alazy_load(
self,
) -> AsyncIterator[Document]:
async def async_generator() -> AsyncIterator[Document]:
for document in self.documents:
yield document
return async_generator()
async def aload(self) -> List[Document]:
"""Load the documents from the source."""
return [doc async for doc in await self.alazy_load()]
class InMemoryVectorStore(VectorStore):
"""In-memory implementation of VectorStore using a dictionary."""
def __init__(self) -> None:
"""Vector store interface for testing things in memory."""
self.store: Dict[str, Document] = {}
def delete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None:
"""Delete the given documents from the store using their IDs."""
if ids:
for _id in ids:
self.store.pop(_id, None)
async def adelete(self, ids: Optional[Sequence[str]] = None, **kwargs: Any) -> None:
"""Delete the given documents from the store using their IDs."""
if ids:
for _id in ids:
self.store.pop(_id, None)
def add_documents( # type: ignore
self,
documents: Sequence[Document],
*,
ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> None:
"""Add the given documents to the store (insert behavior)."""
if ids and len(ids) != len(documents):
raise ValueError(
f"Expected {len(ids)} ids, got {len(documents)} documents."
)
if not ids:
raise NotImplementedError("This is not implemented yet.")
for _id, document in zip(ids, documents):
if _id in self.store:
raise ValueError(
f"Document with uid {_id} already exists in the store."
)
self.store[_id] = document
async def aadd_documents(
self,
documents: Sequence[Document],
*,
ids: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> List[str]:
if ids and len(ids) != len(documents):
raise ValueError(
f"Expected {len(ids)} ids, got {len(documents)} documents."
)
if not ids:
raise NotImplementedError("This is not implemented yet.")
for _id, document in zip(ids, documents):
if _id in self.store:
raise ValueError(
f"Document with uid {_id} already exists in the store."
)
self.store[_id] = document
return list(ids)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> List[str]:
"""Add the given texts to the store (insert behavior)."""
raise NotImplementedError()
@classmethod
def from_texts(
cls: Type[VST],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> VST:
"""Create a vector store from a list of texts."""
raise NotImplementedError()
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Find the most similar documents to the given query."""
raise NotImplementedError()
@pytest.fixture
def record_manager() -> SQLRecordManager:
"""Timestamped set fixture."""
record_manager = SQLRecordManager("kittens", db_url="sqlite:///:memory:")
record_manager.create_schema()
return record_manager
@pytest_asyncio.fixture # type: ignore
@pytest.mark.requires("aiosqlite")
async def arecord_manager() -> SQLRecordManager:
"""Timestamped set fixture."""
record_manager = SQLRecordManager(
"kittens",
db_url="sqlite+aiosqlite:///:memory:",
async_mode=True,
)
await record_manager.acreate_schema()
return record_manager
@pytest.fixture
def vector_store() -> InMemoryVectorStore:
"""Vector store fixture."""
return InMemoryVectorStore()
def test_indexing_same_content(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert index(loader, record_manager, vector_store) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert index(loader, record_manager, vector_store) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_same_content(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
assert await aindex(await loader.alazy_load(), arecord_manager, vector_store) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
assert len(list(vector_store.store)) == 2
for _ in range(2):
# Run the indexing again
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_index_simple_delete_full(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"mutated document 1", "This is another document."}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindex_simple_delete_full(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Indexing some content to confirm it gets added only once."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
),
Document(
page_content="This is another document.",
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store, cleanup="full"
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 1).timestamp()
):
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store, cleanup="full"
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
),
Document(
page_content="This is another document.", # <-- Same as original
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store, cleanup="full"
) == {
"num_added": 1,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"mutated document 1", "This is another document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store, cleanup="full"
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
def test_incremental_fails_with_bad_source_ids(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
index(loader, record_manager, vector_store, cleanup="incremental")
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
)
@pytest.mark.requires("aiosqlite")
async def test_aincremental_fails_with_bad_source_ids(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
Document(
page_content="This is yet another document.",
metadata={"source": None},
),
]
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
)
with pytest.raises(ValueError):
# Should raise an error because no source id function was specified
await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
)
def test_no_delete(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_ano_delete(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing without a deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# If we add the same content twice it should be skipped
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
loader = ToyLoader(
documents=[
Document(
page_content="mutated content",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
# Should result in no updates or deletions!
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup=None,
source_id_key="source",
) == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 1,
"num_updated": 0,
}
def test_incremental_delete(
record_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
record_manager, "get_time", return_value=datetime(2021, 1, 3).timestamp()
):
assert index(
loader,
record_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
@pytest.mark.requires("aiosqlite")
async def test_aincremental_delete(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Test indexing with incremental deletion strategy."""
loader = ToyLoader(
documents=[
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is another document.",
metadata={"source": "2"},
),
]
)
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {"This is another document.", "This is a test document."}
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 2).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 2,
"num_updated": 0,
}
# Create 2 documents from the same source all with mutated content
loader = ToyLoader(
documents=[
Document(
page_content="mutated document 1",
metadata={"source": "1"},
),
Document(
page_content="mutated document 2",
metadata={"source": "1"},
),
Document(
page_content="This is another document.", # <-- Same as original
metadata={"source": "2"},
),
]
)
# Attempt to index again verify that nothing changes
with patch.object(
arecord_manager, "aget_time", return_value=datetime(2021, 1, 3).timestamp()
):
assert await aindex(
await loader.alazy_load(),
arecord_manager,
vector_store,
cleanup="incremental",
source_id_key="source",
) == {
"num_added": 2,
"num_deleted": 1,
"num_skipped": 1,
"num_updated": 0,
}
doc_texts = set(
# Ignoring type since doc should be in the store and not a None
vector_store.store.get(uid).page_content # type: ignore
for uid in vector_store.store
)
assert doc_texts == {
"mutated document 1",
"mutated document 2",
"This is another document.",
}
def test_indexing_with_no_docs(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert index(loader, record_manager, vector_store, cleanup="full") == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_aindexing_with_no_docs(
arecord_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
loader = ToyLoader(documents=[])
assert await aindex(
await loader.alazy_load(), arecord_manager, vector_store, cleanup="full"
) == {
"num_added": 0,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_adeduplication(
arecord_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
Document(
page_content="This is a test document.",
metadata={"source": "1"},
),
]
# Should result in only a single document being added
assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == {
"num_added": 1,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
def test_cleanup_with_different_batchsize(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert index(
docs, record_manager, vector_store, cleanup="full", cleanup_batch_size=17
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
@pytest.mark.requires("aiosqlite")
async def test_async_cleanup_with_different_batchsize(
arecord_manager: SQLRecordManager, vector_store: InMemoryVectorStore
) -> None:
"""Check that we can clean up with different batch size."""
docs = [
Document(
page_content="This is a test document.",
metadata={"source": str(d)},
)
for d in range(1000)
]
assert await aindex(docs, arecord_manager, vector_store, cleanup="full") == {
"num_added": 1000,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
docs = [
Document(
page_content="Different doc",
metadata={"source": str(d)},
)
for d in range(1001)
]
assert await aindex(
docs, arecord_manager, vector_store, cleanup="full", cleanup_batch_size=17
) == {
"num_added": 1001,
"num_deleted": 1000,
"num_skipped": 0,
"num_updated": 0,
}
def test_deduplication_v2(
record_manager: SQLRecordManager, vector_store: VectorStore
) -> None:
"""Check edge case when loader returns no new docs."""
docs = [
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="1",
metadata={"source": "1"},
),
Document(
page_content="2",
metadata={"source": "2"},
),
Document(
page_content="3",
metadata={"source": "3"},
),
]
assert index(docs, record_manager, vector_store, cleanup="full") == {
"num_added": 3,
"num_deleted": 0,
"num_skipped": 0,
"num_updated": 0,
}
# using in memory implementation here
assert isinstance(vector_store, InMemoryVectorStore)
contents = sorted(
[document.page_content for document in vector_store.store.values()]
)
assert contents == ["1", "2", "3"]
async def _to_async_iter(it: Iterable[Any]) -> AsyncIterator[Any]:
"""Convert an iterable to an async iterator."""
for i in it:
yield i
async def test_abatch() -> None:
"""Test the abatch function."""
batches = _abatch(5, _to_async_iter(range(12)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11],
]
batches = _abatch(1, _to_async_iter(range(3)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0], [1], [2]]
batches = _abatch(2, _to_async_iter(range(5)))
assert isinstance(batches, AsyncIterator)
assert [batch async for batch in batches] == [[0, 1], [2, 3], [4]]
def test_compatible_vectorstore_documentation() -> None:
"""Test which vectorstores are compatible with the indexing API.
This serves as a reminder to update the documentation in [1]
that specifies which vectorstores are compatible with the
indexing API.
Ideally if a developer adds a new vectorstore or modifies
an existing one in such a way that affects its compatibility
with the Indexing API, he/she will see this failed test
case and 1) update docs in [1] and 2) update the `documented`
dict in this test case.
[1] langchain/docs/docs_skeleton/docs/modules/data_connection/indexing.ipynb
"""
# Check if a vectorstore is compatible with the indexing API
def check_compatibility(vector_store: VectorStore) -> bool:
"""Check if a vectorstore is compatible with the indexing API."""
methods = ["delete", "add_documents"]
for method in methods:
if not hasattr(vector_store, method):
return False
# Checking if the vectorstore has overridden the default delete method
# implementation which just raises a NotImplementedError
if getattr(vector_store, "delete") == VectorStore.delete:
return False
return True
# Check all vector store classes for compatibility
compatible = set()
for class_name in langchain.vectorstores.__all__:
# Get the definition of the class
cls = getattr(langchain.vectorstores, class_name)
# If the class corresponds to a vectorstore, check its compatibility
if issubclass(cls, VectorStore):
is_compatible = check_compatibility(cls)
if is_compatible:
compatible.add(class_name)
# These are mentioned in the indexing.ipynb documentation
documented = {
"AnalyticDB",
"AstraDB",
"AzureCosmosDBVectorSearch",
"AwaDB",
"Bagel",
"Cassandra",
"Chroma",
"DashVector",
"DatabricksVectorSearch",
"DeepLake",
"Dingo",
"ElasticVectorSearch",
"ElasticsearchStore",
"FAISS",
"MomentoVectorIndex",
"MyScale",
"PGVector",
"Pinecone",
"Qdrant",
"Redis",
"ScaNN",
"SemaDB",
"SupabaseVectorStore",
"TileDB",
"TimescaleVector",
"Vald",
"Vearch",
"VespaStore",
"Weaviate",
"ZepVectorStore",
}
assert compatible == documented
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | libs/langchain/tests/unit_tests/retrievers/test_multi_vector.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,342 | Error: | ### System Info
I try this example code
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
```
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
but I encountered an error:
```
1 # Initialize the retriever
----> 2 parent_document_retriever = ParentDocumentRetriever(
3 vectorstore=vectorstore,
4 docstore=store,
5 child_splitter=child_splitter,
TypeError: MultiVectorRetriever.__init__() got an unexpected keyword argument 'child_splitter'
```
### Who can help?
_No response_
### Information
- [X] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
```
from langchain.retrievers import ParentDocumentRetriever
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.storage import InMemoryStore
# This text splitter is used to create the parent documents
parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000)
# This text splitter is used to create the child documents
# It should create documents smaller than the parent
child_splitter = RecursiveCharacterTextSplitter(chunk_size=400)
# The vectorstore to use to index the child chunks
vectorstore = Chroma(embedding_function=OpenAIEmbeddings())
# The storage layer for the parent documents
store = InMemoryStore()
vectorstore = Chroma(collection_name="test", embedding_function=OpenAIEmbeddings())
# Initialize the retriever
parent_document_retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
parent_splitter=parent_splitter,
)
```
### Expected behavior
I can run. | https://github.com/langchain-ai/langchain/issues/14342 | https://github.com/langchain-ai/langchain/pull/14350 | 7bdfc43766e72e4b67512bd85119b1c797035b86 | 867ca6d0bec2dac5330257bc886880743f3ece4d | "2023-12-06T11:09:11Z" | python | "2023-12-06T19:12:50Z" | libs/langchain/tests/unit_tests/retrievers/test_parent_document.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,467 | DeprecationWarning: `input_variables' is deprecated and ignored | ### System Info
langchain==0.0.346
### Who can help?
@hwch
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [X] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [X] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
https://github.com/langchain-ai/langchain/pull/14266 added a deprecation for `input_variables` argument of `PromptTemplate.from_file`. It was released in 0.0.346.
However, https://github.com/langchain-ai/langchain/blob/v0.0.346/libs/langchain/langchain/chains/llm_summarization_checker/base.py#L20-L31 still uses `input_variables` at the module-level. So now this `DeprecationWarning` is emitted simply for importing from LangChain.
Can we fix this, so LangChain isn't emitting `DeprecationWarning`s?
### Expected behavior
I expect LangChain to not automatically emit `DeprecationWarning`s when importing from it | https://github.com/langchain-ai/langchain/issues/14467 | https://github.com/langchain-ai/langchain/pull/14468 | df95abb7e76f4432f5d270e4065d2cb2839a9f64 | b9ef92f2f4bb1e4322dff040c4474c0922fbf546 | "2023-12-08T21:10:46Z" | python | "2023-12-13T01:43:27Z" | libs/langchain/langchain/chains/llm_summarization_checker/base.py | """Chain for summarization with self-verification."""
from __future__ import annotations
import warnings
from pathlib import Path
from typing import Any, Dict, List, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sequential import SequentialChain
PROMPTS_DIR = Path(__file__).parent / "prompts"
CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "create_facts.txt", ["summary"]
)
CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "check_facts.txt", ["assertions"]
)
REVISED_SUMMARY_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "revise_summary.txt", ["checked_assertions", "summary"]
)
ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file(
PROMPTS_DIR / "are_all_true_prompt.txt", ["checked_assertions"]
)
def _load_sequential_chain(
llm: BaseLanguageModel,
create_assertions_prompt: PromptTemplate,
check_assertions_prompt: PromptTemplate,
revised_summary_prompt: PromptTemplate,
are_all_true_prompt: PromptTemplate,
verbose: bool = False,
) -> SequentialChain:
chain = SequentialChain(
chains=[
LLMChain(
llm=llm,
prompt=create_assertions_prompt,
output_key="assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=check_assertions_prompt,
output_key="checked_assertions",
verbose=verbose,
),
LLMChain(
llm=llm,
prompt=revised_summary_prompt,
output_key="revised_summary",
verbose=verbose,
),
LLMChain(
llm=llm,
output_key="all_true",
prompt=are_all_true_prompt,
verbose=verbose,
),
],
input_variables=["summary"],
output_variables=["all_true", "revised_summary"],
verbose=verbose,
)
return chain
class LLMSummarizationCheckerChain(Chain):
"""Chain for question-answering with self-verification.
Example:
.. code-block:: python
from langchain.llms import OpenAI
from langchain.chains import LLMSummarizationCheckerChain
llm = OpenAI(temperature=0.0)
checker_chain = LLMSummarizationCheckerChain.from_llm(llm)
"""
sequential_chain: SequentialChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT
"""[Deprecated]"""
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT
"""[Deprecated]"""
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT
"""[Deprecated]"""
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT
"""[Deprecated]"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
max_checks: int = 2
"""Maximum number of times to check the assertions. Default to double-checking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an LLMSummarizationCheckerChain with an llm is "
"deprecated. Please instantiate with"
" sequential_chain argument or using the from_llm class method."
)
if "sequential_chain" not in values and values["llm"] is not None:
values["sequential_chain"] = _load_sequential_chain(
values["llm"],
values.get("create_assertions_prompt", CREATE_ASSERTIONS_PROMPT),
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
values.get("revised_summary_prompt", REVISED_SUMMARY_PROMPT),
values.get("are_all_true_prompt", ARE_ALL_TRUE_PROMPT),
verbose=values.get("verbose", False),
)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
all_true = False
count = 0
output = None
original_input = inputs[self.input_key]
chain_input = original_input
while not all_true and count < self.max_checks:
output = self.sequential_chain(
{"summary": chain_input}, callbacks=_run_manager.get_child()
)
count += 1
if output["all_true"].strip() == "True":
break
if self.verbose:
print(output["revised_summary"])
chain_input = output["revised_summary"]
if not output:
raise ValueError("No output from chain")
return {self.output_key: output["revised_summary"].strip()}
@property
def _chain_type(self) -> str:
return "llm_summarization_checker_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT,
check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT,
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT,
are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT,
verbose: bool = False,
**kwargs: Any,
) -> LLMSummarizationCheckerChain:
chain = _load_sequential_chain(
llm,
create_assertions_prompt,
check_assertions_prompt,
revised_summary_prompt,
are_all_true_prompt,
verbose=verbose,
)
return cls(sequential_chain=chain, verbose=verbose, **kwargs)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,467 | DeprecationWarning: `input_variables' is deprecated and ignored | ### System Info
langchain==0.0.346
### Who can help?
@hwch
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [X] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [X] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
https://github.com/langchain-ai/langchain/pull/14266 added a deprecation for `input_variables` argument of `PromptTemplate.from_file`. It was released in 0.0.346.
However, https://github.com/langchain-ai/langchain/blob/v0.0.346/libs/langchain/langchain/chains/llm_summarization_checker/base.py#L20-L31 still uses `input_variables` at the module-level. So now this `DeprecationWarning` is emitted simply for importing from LangChain.
Can we fix this, so LangChain isn't emitting `DeprecationWarning`s?
### Expected behavior
I expect LangChain to not automatically emit `DeprecationWarning`s when importing from it | https://github.com/langchain-ai/langchain/issues/14467 | https://github.com/langchain-ai/langchain/pull/14468 | df95abb7e76f4432f5d270e4065d2cb2839a9f64 | b9ef92f2f4bb1e4322dff040c4474c0922fbf546 | "2023-12-08T21:10:46Z" | python | "2023-12-13T01:43:27Z" | libs/langchain/tests/unit_tests/chains/test_llm_summarization_checker.py | # flake8: noqa E501
"""Test LLMSummarization functionality."""
import pytest
from langchain.chains.llm_summarization_checker.base import (
ARE_ALL_TRUE_PROMPT,
CHECK_ASSERTIONS_PROMPT,
CREATE_ASSERTIONS_PROMPT,
REVISED_SUMMARY_PROMPT,
LLMSummarizationCheckerChain,
)
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.fixture
def fake_llm_summarization_checker_chain() -> LLMSummarizationCheckerChain:
"""Fake LLMCheckerChain for testing."""
queries = {
CREATE_ASSERTIONS_PROMPT.format(
summary="a",
): "b",
CHECK_ASSERTIONS_PROMPT.format(
assertions="b",
): "- b - True",
REVISED_SUMMARY_PROMPT.format(
checked_assertions="- b - True", summary="a"
): "b",
ARE_ALL_TRUE_PROMPT.format(
checked_assertions="- b - True",
): "True",
}
fake_llm = FakeLLM(queries=queries)
return LLMSummarizationCheckerChain.from_llm(
fake_llm, input_key="q", output_key="a"
)
def test_simple_text(
fake_llm_summarization_checker_chain: LLMSummarizationCheckerChain,
) -> None:
"""Test simple question that should not need python."""
question = "a"
output = fake_llm_summarization_checker_chain.run(question)
assert output == "b"
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,232 | similarity_search_with_score does not accept "!" in the query | ### System Info
python 3.11.6
langchain==0.0.345
langchain-core==0.0.9
jupyter_client==8.6.0
jupyter_core==5.5.0
ipykernel==6.27.0
ipython==8.17.2
on mac M2
### Who can help?
@baskaryan @tomasonjo @hwchase17
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
1. The setup is the same as in https://github.com/langchain-ai/langchain/issues/14231, although I don't think it matters.
2. Run `existing_graph.similarity_search_with_score("It is the end of the world. Take shelter!")`
3. It returns the following error
---------------------------------------------------------------------------
ClientError Traceback (most recent call last)
[/Users/josselinperrus/Projects/streetpress/neo4j.ipynb](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/neo4j.ipynb) Cell 17 line 1
----> [1](vscode-notebook-cell:/Users/josselinperrus/Projects/streetpress/neo4j.ipynb#X21sZmlsZQ%3D%3D?line=0) existing_graph.similarity_search_with_score("It is the end of the world. Take shelter !")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550), in Neo4jVector.similarity_search_with_score(self, query, k)
[540](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:540) """Return docs most similar to query.
[541](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:541)
[542](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:542) Args:
(...)
[547](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:547) List of Documents most similar to the query and score for each
[548](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:548) """
[549](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:549) embedding = self.embedding.embed_query(query)
--> [550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550) docs = self.similarity_search_with_score_by_vector(
[551](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:551) embedding=embedding, k=k, query=query
[552](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:552) )
[553](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:553) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595), in Neo4jVector.similarity_search_with_score_by_vector(self, embedding, k, **kwargs)
[586](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:586) read_query = _get_search_index_query(self.search_type) + retrieval_query
[587](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:587) parameters = {
[588](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:588) "index": self.index_name,
[589](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:589) "k": k,
(...)
[592](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:592) "query": kwargs["query"],
[593](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:593) }
--> [595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595) results = self.query(read_query, params=parameters)
[597](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:597) docs = [
[598](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:598) (
[599](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:599) Document(
(...)
[607](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:607) for result in results
[608](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:608) ]
[609](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:609) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in Neo4jVector.query(self, query, params)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in <listcomp>(.0)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270), in Result.__iter__(self)
[268](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:268) yield self._record_buffer.popleft()
[269](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:269) elif self._streaming:
--> [270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270) self._connection.fetch_message()
[271](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:271) elif self._discarding:
[272](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:272) self._discard()
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178), in ConnectionErrorHandler.__getattr__.<locals>.outer.<locals>.inner(*args, **kwargs)
[176](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:176) def inner(*args, **kwargs):
[177](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:177) try:
--> [178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178) func(*args, **kwargs)
[179](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:179) except (Neo4jError, ServiceUnavailable, SessionExpired) as exc:
[180](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:180) assert not asyncio.iscoroutinefunction(self.__on_error)
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849), in Bolt.fetch_message(self)
[845](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:845) # Receive exactly one message
[846](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:846) tag, fields = self.inbox.pop(
[847](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:847) hydration_hooks=self.responses[0].hydration_hooks
[848](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:848) )
--> [849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849) res = self._process_message(tag, fields)
[850](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:850) self.idle_since = perf_counter()
[851](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:851) return res
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374), in Bolt5x0._process_message(self, tag, fields)
[372](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:372) self._server_state_manager.state = self.bolt_states.FAILED
[373](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:373) try:
--> [374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374) response.on_failure(summary_metadata or {})
[375](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:375) except (ServiceUnavailable, DatabaseUnavailable):
[376](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:376) if self.pool:
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245), in Response.on_failure(self, metadata)
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:243) handler = self.handlers.get("on_summary")
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:244) Util.callback(handler)
--> [245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245) raise Neo4jError.hydrate(**metadata)
ClientError: {code: Neo.ClientError.Procedure.ProcedureCallFailed} {message: Failed to invoke procedure `db.index.fulltext.queryNodes`: Caused by: org.apache.lucene.queryparser.classic.ParseException: Encountered "<EOF>" at line 1, column 42.
Was expecting one of:
<BAREOPER> ...
"(" ...
"*" ...
<QUOTED> ...
<TERM> ...
<PREFIXTERM> ...
<WILDTERM> ...
<REGEXPTERM> ...
"[" ...
"{" ...
<NUMBER> ...
<TERM> ...
"*" ...
}
### Expected behavior
No error | https://github.com/langchain-ai/langchain/issues/14232 | https://github.com/langchain-ai/langchain/pull/14646 | 7e6ca3c2b95594d7bdee4ba1337896b5dca1833e | ea2616ae23be97135696f7ace838ff38661426fc | "2023-12-04T16:07:02Z" | python | "2023-12-13T17:09:50Z" | libs/community/langchain_community/vectorstores/neo4j_vector.py | from __future__ import annotations
import enum
import logging
import os
import uuid
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_env
from langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import DistanceStrategy
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
DISTANCE_MAPPING = {
DistanceStrategy.EUCLIDEAN_DISTANCE: "euclidean",
DistanceStrategy.COSINE: "cosine",
}
class SearchType(str, enum.Enum):
"""Enumerator of the Distance strategies."""
VECTOR = "vector"
HYBRID = "hybrid"
DEFAULT_SEARCH_TYPE = SearchType.VECTOR
def _get_search_index_query(search_type: SearchType) -> str:
type_to_query_map = {
SearchType.VECTOR: (
"CALL db.index.vector.queryNodes($index, $k, $embedding) YIELD node, score "
),
SearchType.HYBRID: (
"CALL { "
"CALL db.index.vector.queryNodes($index, $k, $embedding) "
"YIELD node, score "
"RETURN node, score UNION "
"CALL db.index.fulltext.queryNodes($keyword_index, $query, {limit: $k}) "
"YIELD node, score "
"WITH collect({node:node, score:score}) AS nodes, max(score) AS max "
"UNWIND nodes AS n "
"RETURN n.node AS node, (n.score / max) AS score " # We use 0 as min
"} "
"WITH node, max(score) AS score ORDER BY score DESC LIMIT $k " # dedup
),
}
return type_to_query_map[search_type]
def check_if_not_null(props: List[str], values: List[Any]) -> None:
"""Check if the values are not None or empty string"""
for prop, value in zip(props, values):
if not value:
raise ValueError(f"Parameter `{prop}` must not be None or empty string")
def sort_by_index_name(
lst: List[Dict[str, Any]], index_name: str
) -> List[Dict[str, Any]]:
"""Sort first element to match the index_name if exists"""
return sorted(lst, key=lambda x: x.get("index_name") != index_name)
class Neo4jVector(VectorStore):
"""`Neo4j` vector index.
To use, you should have the ``neo4j`` python package installed.
Args:
url: Neo4j connection url
username: Neo4j username.
password: Neo4j password
database: Optionally provide Neo4j database
Defaults to "neo4j"
embedding: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete existing data if it exists.
(default: False). Useful for testing.
Example:
.. code-block:: python
from langchain_community.vectorstores.neo4j_vector import Neo4jVector
from langchain_community.embeddings.openai import OpenAIEmbeddings
url="bolt://localhost:7687"
username="neo4j"
password="pleaseletmein"
embeddings = OpenAIEmbeddings()
vectorestore = Neo4jVector.from_documents(
embedding=embeddings,
documents=docs,
url=url
username=username,
password=password,
)
"""
def __init__(
self,
embedding: Embeddings,
*,
search_type: SearchType = SearchType.VECTOR,
username: Optional[str] = None,
password: Optional[str] = None,
url: Optional[str] = None,
keyword_index_name: Optional[str] = "keyword",
database: str = "neo4j",
index_name: str = "vector",
node_label: str = "Chunk",
embedding_node_property: str = "embedding",
text_node_property: str = "text",
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
logger: Optional[logging.Logger] = None,
pre_delete_collection: bool = False,
retrieval_query: str = "",
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
try:
import neo4j
except ImportError:
raise ImportError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
# Allow only cosine and euclidean distance strategies
if distance_strategy not in [
DistanceStrategy.EUCLIDEAN_DISTANCE,
DistanceStrategy.COSINE,
]:
raise ValueError(
"distance_strategy must be either 'EUCLIDEAN_DISTANCE' or 'COSINE'"
)
# Handle if the credentials are environment variables
# Support URL for backwards compatibility
url = os.environ.get("NEO4J_URL", url)
url = get_from_env("url", "NEO4J_URI", url)
username = get_from_env("username", "NEO4J_USERNAME", username)
password = get_from_env("password", "NEO4J_PASSWORD", password)
database = get_from_env("database", "NEO4J_DATABASE", database)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self.schema = ""
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct"
)
# Verify if the version support vector index
self.verify_version()
# Verify that required values are not null
check_if_not_null(
[
"index_name",
"node_label",
"embedding_node_property",
"text_node_property",
],
[index_name, node_label, embedding_node_property, text_node_property],
)
self.embedding = embedding
self._distance_strategy = distance_strategy
self.index_name = index_name
self.keyword_index_name = keyword_index_name
self.node_label = node_label
self.embedding_node_property = embedding_node_property
self.text_node_property = text_node_property
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.retrieval_query = retrieval_query
self.search_type = search_type
# Calculate embedding dimension
self.embedding_dimension = len(embedding.embed_query("foo"))
# Delete existing data if flagged
if pre_delete_collection:
from neo4j.exceptions import DatabaseError
self.query(
f"MATCH (n:`{self.node_label}`) "
"CALL { WITH n DETACH DELETE n } "
"IN TRANSACTIONS OF 10000 ROWS;"
)
# Delete index
try:
self.query(f"DROP INDEX {self.index_name}")
except DatabaseError: # Index didn't exist yet
pass
def query(
self, query: str, *, params: Optional[dict] = None
) -> List[Dict[str, Any]]:
"""
This method sends a Cypher query to the connected Neo4j database
and returns the results as a list of dictionaries.
Args:
query (str): The Cypher query to execute.
params (dict, optional): Dictionary of query parameters. Defaults to {}.
Returns:
List[Dict[str, Any]]: List of dictionaries containing the query results.
"""
from neo4j.exceptions import CypherSyntaxError
params = params or {}
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f"Cypher Statement is not valid\n{e}")
def verify_version(self) -> None:
"""
Check if the connected Neo4j database version supports vector indexing.
Queries the Neo4j database to retrieve its version and compares it
against a target version (5.11.0) that is known to support vector
indexing. Raises a ValueError if the connected Neo4j version is
not supported.
"""
version = self.query("CALL dbms.components()")[0]["versions"][0]
if "aura" in version:
version_tuple = tuple(map(int, version.split("-")[0].split("."))) + (0,)
else:
version_tuple = tuple(map(int, version.split(".")))
target_version = (5, 11, 0)
if version_tuple < target_version:
raise ValueError(
"Version index is only supported in Neo4j version 5.11 or greater"
)
def retrieve_existing_index(self) -> Optional[int]:
"""
Check if the vector index exists in the Neo4j database
and returns its embedding dimension.
This method queries the Neo4j database for existing indexes
and attempts to retrieve the dimension of the vector index
with the specified name. If the index exists, its dimension is returned.
If the index doesn't exist, `None` is returned.
Returns:
int or None: The embedding dimension of the existing index if found.
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options "
"WHERE type = 'VECTOR' AND (name = $index_name "
"OR (labelsOrTypes[0] = $node_label AND "
"properties[0] = $embedding_node_property)) "
"RETURN name, labelsOrTypes, properties, options ",
params={
"index_name": self.index_name,
"node_label": self.node_label,
"embedding_node_property": self.embedding_node_property,
},
)
# sort by index_name
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.index_name = index_information[0]["name"]
self.node_label = index_information[0]["labelsOrTypes"][0]
self.embedding_node_property = index_information[0]["properties"][0]
embedding_dimension = index_information[0]["options"]["indexConfig"][
"vector.dimensions"
]
return embedding_dimension
except IndexError:
return None
def retrieve_existing_fts_index(
self, text_node_properties: List[str] = []
) -> Optional[str]:
"""
Check if the fulltext index exists in the Neo4j database
This method queries the Neo4j database for existing fts indexes
with the specified name.
Returns:
(Tuple): keyword index information
"""
index_information = self.query(
"SHOW INDEXES YIELD name, type, labelsOrTypes, properties, options "
"WHERE type = 'FULLTEXT' AND (name = $keyword_index_name "
"OR (labelsOrTypes = [$node_label] AND "
"properties = $text_node_property)) "
"RETURN name, labelsOrTypes, properties, options ",
params={
"keyword_index_name": self.keyword_index_name,
"node_label": self.node_label,
"text_node_property": text_node_properties or [self.text_node_property],
},
)
# sort by index_name
index_information = sort_by_index_name(index_information, self.index_name)
try:
self.keyword_index_name = index_information[0]["name"]
self.text_node_property = index_information[0]["properties"][0]
node_label = index_information[0]["labelsOrTypes"][0]
return node_label
except IndexError:
return None
def create_new_index(self) -> None:
"""
This method constructs a Cypher query and executes it
to create a new vector index in Neo4j.
"""
index_query = (
"CALL db.index.vector.createNodeIndex("
"$index_name,"
"$node_label,"
"$embedding_node_property,"
"toInteger($embedding_dimension),"
"$similarity_metric )"
)
parameters = {
"index_name": self.index_name,
"node_label": self.node_label,
"embedding_node_property": self.embedding_node_property,
"embedding_dimension": self.embedding_dimension,
"similarity_metric": DISTANCE_MAPPING[self._distance_strategy],
}
self.query(index_query, params=parameters)
def create_new_keyword_index(self, text_node_properties: List[str] = []) -> None:
"""
This method constructs a Cypher query and executes it
to create a new full text index in Neo4j.
"""
node_props = text_node_properties or [self.text_node_property]
fts_index_query = (
f"CREATE FULLTEXT INDEX {self.keyword_index_name} "
f"FOR (n:`{self.node_label}`) ON EACH "
f"[{', '.join(['n.`' + el + '`' for el in node_props])}]"
)
self.query(fts_index_query)
@property
def embeddings(self) -> Embeddings:
return self.embedding
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
create_id_index: bool = True,
search_type: SearchType = SearchType.VECTOR,
**kwargs: Any,
) -> Neo4jVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
store = cls(
embedding=embedding,
search_type=search_type,
**kwargs,
)
# Check if the vector index already exists
embedding_dimension = store.retrieve_existing_index()
# If the vector index doesn't exist yet
if not embedding_dimension:
store.create_new_index()
# If the index already exists, check if embedding dimensions match
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"Index with name {store.index_name} already exists."
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index()
# If the FTS index doesn't exist yet
if not fts_node_label:
store.create_new_keyword_index()
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
# Create unique constraint for faster import
if create_id_index:
store.query(
"CREATE CONSTRAINT IF NOT EXISTS "
f"FOR (n:`{store.node_label}`) REQUIRE n.id IS UNIQUE;"
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
import_query = (
"UNWIND $data AS row "
"CALL { WITH row "
f"MERGE (c:`{self.node_label}` {{id: row.id}}) "
"WITH c, row "
f"CALL db.create.setVectorProperty(c, "
f"'{self.embedding_node_property}', row.embedding) "
"YIELD node "
f"SET c.`{self.text_node_property}` = row.text "
"SET c += row.metadata } IN TRANSACTIONS OF 1000 ROWS"
)
parameters = {
"data": [
{"text": text, "metadata": metadata, "embedding": embedding, "id": id}
for text, metadata, embedding, id in zip(
texts, metadatas, embeddings, ids
)
]
}
self.query(import_query, params=parameters)
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Neo4jVector.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
query=query,
)
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, query=query
)
return docs
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Perform a similarity search in the Neo4j database using a
given vector and return the top k similar documents with their scores.
This method uses a Cypher query to find the top k documents that
are most similar to a given embedding. The similarity is measured
using a vector index in the Neo4j database. The results are returned
as a list of tuples, each containing a Document object and
its similarity score.
Args:
embedding (List[float]): The embedding vector to compare against.
k (int, optional): The number of top similar documents to retrieve.
Returns:
List[Tuple[Document, float]]: A list of tuples, each containing
a Document object and its similarity score.
"""
default_retrieval = (
f"RETURN node.`{self.text_node_property}` AS text, score, "
f"node {{.*, `{self.text_node_property}`: Null, "
f"`{self.embedding_node_property}`: Null, id: Null }} AS metadata"
)
retrieval_query = (
self.retrieval_query if self.retrieval_query else default_retrieval
)
read_query = _get_search_index_query(self.search_type) + retrieval_query
parameters = {
"index": self.index_name,
"k": k,
"embedding": embedding,
"keyword_index": self.keyword_index_name,
"query": kwargs["query"],
}
results = self.query(read_query, params=parameters)
docs = [
(
Document(
page_content=result["text"],
metadata={
k: v for k, v in result["metadata"].items() if v is not None
},
),
result["score"],
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, **kwargs
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[Neo4jVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Return Neo4jVector initialized from texts and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
distance_strategy=distance_strategy,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> Neo4jVector:
"""Construct Neo4jVector wrapper from raw documents and pre-
generated embeddings.
Return Neo4jVector initialized from documents and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
Example:
.. code-block:: python
from langchain_community.vectorstores.neo4j_vector import Neo4jVector
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
vectorstore = Neo4jVector.from_embeddings(
text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[Neo4jVector],
embedding: Embeddings,
index_name: str,
search_type: SearchType = DEFAULT_SEARCH_TYPE,
keyword_index_name: Optional[str] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Get instance of an existing Neo4j vector index. This method will
return the instance of the store without inserting any new
embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters along with
the `index_name` definition.
"""
if search_type == SearchType.HYBRID and not keyword_index_name:
raise ValueError(
"keyword_index name has to be specified "
"when using hybrid search option"
)
store = cls(
embedding=embedding,
index_name=index_name,
keyword_index_name=keyword_index_name,
search_type=search_type,
**kwargs,
)
embedding_dimension = store.retrieve_existing_index()
if not embedding_dimension:
raise ValueError(
"The specified vector index name does not exist. "
"Make sure to check if you spelled it correctly"
)
# Check if embedding function and vector index dimensions match
if not store.embedding_dimension == embedding_dimension:
raise ValueError(
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index()
# If the FTS index doesn't exist yet
if not fts_node_label:
raise ValueError(
"The specified keyword index name does not exist. "
"Make sure to check if you spelled it correctly"
)
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
return store
@classmethod
def from_documents(
cls: Type[Neo4jVector],
documents: List[Document],
embedding: Embeddings,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Neo4jVector:
"""
Return Neo4jVector initialized from documents and embeddings.
Neo4j credentials are required in the form of `url`, `username`,
and `password` and optional `database` parameters.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
**kwargs,
)
@classmethod
def from_existing_graph(
cls: Type[Neo4jVector],
embedding: Embeddings,
node_label: str,
embedding_node_property: str,
text_node_properties: List[str],
*,
keyword_index_name: Optional[str] = "keyword",
index_name: str = "vector",
search_type: SearchType = DEFAULT_SEARCH_TYPE,
retrieval_query: str = "",
**kwargs: Any,
) -> Neo4jVector:
"""
Initialize and return a Neo4jVector instance from an existing graph.
This method initializes a Neo4jVector instance using the provided
parameters and the existing graph. It validates the existence of
the indices and creates new ones if they don't exist.
Returns:
Neo4jVector: An instance of Neo4jVector initialized with the provided parameters
and existing graph.
Example:
>>> neo4j_vector = Neo4jVector.from_existing_graph(
... embedding=my_embedding,
... node_label="Document",
... embedding_node_property="embedding",
... text_node_properties=["title", "content"]
... )
Note:
Neo4j credentials are required in the form of `url`, `username`, and `password`,
and optional `database` parameters passed as additional keyword arguments.
"""
# Validate the list is not empty
if not text_node_properties:
raise ValueError(
"Parameter `text_node_properties` must not be an empty list"
)
# Prefer retrieval query from params, otherwise construct it
if not retrieval_query:
retrieval_query = (
f"RETURN reduce(str='', k IN {text_node_properties} |"
" str + '\\n' + k + ': ' + coalesce(node[k], '')) AS text, "
"node {.*, `"
+ embedding_node_property
+ "`: Null, id: Null, "
+ ", ".join([f"`{prop}`: Null" for prop in text_node_properties])
+ "} AS metadata, score"
)
store = cls(
embedding=embedding,
index_name=index_name,
keyword_index_name=keyword_index_name,
search_type=search_type,
retrieval_query=retrieval_query,
node_label=node_label,
embedding_node_property=embedding_node_property,
**kwargs,
)
# Check if the vector index already exists
embedding_dimension = store.retrieve_existing_index()
# If the vector index doesn't exist yet
if not embedding_dimension:
store.create_new_index()
# If the index already exists, check if embedding dimensions match
elif not store.embedding_dimension == embedding_dimension:
raise ValueError(
f"Index with name {store.index_name} already exists."
"The provided embedding function and vector index "
"dimensions do not match.\n"
f"Embedding function dimension: {store.embedding_dimension}\n"
f"Vector index dimension: {embedding_dimension}"
)
# FTS index for Hybrid search
if search_type == SearchType.HYBRID:
fts_node_label = store.retrieve_existing_fts_index(text_node_properties)
# If the FTS index doesn't exist yet
if not fts_node_label:
store.create_new_keyword_index(text_node_properties)
else: # Validate that FTS and Vector index use the same information
if not fts_node_label == store.node_label:
raise ValueError(
"Vector and keyword index don't index the same node label"
)
# Populate embeddings
while True:
fetch_query = (
f"MATCH (n:`{node_label}`) "
f"WHERE n.{embedding_node_property} IS null "
"AND any(k in $props WHERE n[k] IS NOT null) "
f"RETURN elementId(n) AS id, reduce(str='',"
"k IN $props | str + '\\n' + k + ':' + coalesce(n[k], '')) AS text "
"LIMIT 1000"
)
data = store.query(fetch_query, params={"props": text_node_properties})
text_embeddings = embedding.embed_documents([el["text"] for el in data])
params = {
"data": [
{"id": el["id"], "embedding": embedding}
for el, embedding in zip(data, text_embeddings)
]
}
store.query(
"UNWIND $data AS row "
f"MATCH (n:`{node_label}`) "
"WHERE elementId(n) = row.id "
f"CALL db.create.setVectorProperty(n, "
f"'{embedding_node_property}', row.embedding) "
"YIELD node RETURN count(*)",
params=params,
)
# If embedding calculation should be stopped
if len(data) < 1000:
break
return store
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return lambda x: x
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return lambda x: x
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,232 | similarity_search_with_score does not accept "!" in the query | ### System Info
python 3.11.6
langchain==0.0.345
langchain-core==0.0.9
jupyter_client==8.6.0
jupyter_core==5.5.0
ipykernel==6.27.0
ipython==8.17.2
on mac M2
### Who can help?
@baskaryan @tomasonjo @hwchase17
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
1. The setup is the same as in https://github.com/langchain-ai/langchain/issues/14231, although I don't think it matters.
2. Run `existing_graph.similarity_search_with_score("It is the end of the world. Take shelter!")`
3. It returns the following error
---------------------------------------------------------------------------
ClientError Traceback (most recent call last)
[/Users/josselinperrus/Projects/streetpress/neo4j.ipynb](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/neo4j.ipynb) Cell 17 line 1
----> [1](vscode-notebook-cell:/Users/josselinperrus/Projects/streetpress/neo4j.ipynb#X21sZmlsZQ%3D%3D?line=0) existing_graph.similarity_search_with_score("It is the end of the world. Take shelter !")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550), in Neo4jVector.similarity_search_with_score(self, query, k)
[540](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:540) """Return docs most similar to query.
[541](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:541)
[542](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:542) Args:
(...)
[547](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:547) List of Documents most similar to the query and score for each
[548](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:548) """
[549](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:549) embedding = self.embedding.embed_query(query)
--> [550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550) docs = self.similarity_search_with_score_by_vector(
[551](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:551) embedding=embedding, k=k, query=query
[552](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:552) )
[553](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:553) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595), in Neo4jVector.similarity_search_with_score_by_vector(self, embedding, k, **kwargs)
[586](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:586) read_query = _get_search_index_query(self.search_type) + retrieval_query
[587](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:587) parameters = {
[588](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:588) "index": self.index_name,
[589](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:589) "k": k,
(...)
[592](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:592) "query": kwargs["query"],
[593](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:593) }
--> [595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595) results = self.query(read_query, params=parameters)
[597](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:597) docs = [
[598](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:598) (
[599](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:599) Document(
(...)
[607](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:607) for result in results
[608](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:608) ]
[609](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:609) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in Neo4jVector.query(self, query, params)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in <listcomp>(.0)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270), in Result.__iter__(self)
[268](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:268) yield self._record_buffer.popleft()
[269](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:269) elif self._streaming:
--> [270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270) self._connection.fetch_message()
[271](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:271) elif self._discarding:
[272](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:272) self._discard()
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178), in ConnectionErrorHandler.__getattr__.<locals>.outer.<locals>.inner(*args, **kwargs)
[176](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:176) def inner(*args, **kwargs):
[177](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:177) try:
--> [178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178) func(*args, **kwargs)
[179](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:179) except (Neo4jError, ServiceUnavailable, SessionExpired) as exc:
[180](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:180) assert not asyncio.iscoroutinefunction(self.__on_error)
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849), in Bolt.fetch_message(self)
[845](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:845) # Receive exactly one message
[846](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:846) tag, fields = self.inbox.pop(
[847](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:847) hydration_hooks=self.responses[0].hydration_hooks
[848](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:848) )
--> [849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849) res = self._process_message(tag, fields)
[850](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:850) self.idle_since = perf_counter()
[851](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:851) return res
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374), in Bolt5x0._process_message(self, tag, fields)
[372](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:372) self._server_state_manager.state = self.bolt_states.FAILED
[373](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:373) try:
--> [374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374) response.on_failure(summary_metadata or {})
[375](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:375) except (ServiceUnavailable, DatabaseUnavailable):
[376](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:376) if self.pool:
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245), in Response.on_failure(self, metadata)
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:243) handler = self.handlers.get("on_summary")
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:244) Util.callback(handler)
--> [245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245) raise Neo4jError.hydrate(**metadata)
ClientError: {code: Neo.ClientError.Procedure.ProcedureCallFailed} {message: Failed to invoke procedure `db.index.fulltext.queryNodes`: Caused by: org.apache.lucene.queryparser.classic.ParseException: Encountered "<EOF>" at line 1, column 42.
Was expecting one of:
<BAREOPER> ...
"(" ...
"*" ...
<QUOTED> ...
<TERM> ...
<PREFIXTERM> ...
<WILDTERM> ...
<REGEXPTERM> ...
"[" ...
"{" ...
<NUMBER> ...
<TERM> ...
"*" ...
}
### Expected behavior
No error | https://github.com/langchain-ai/langchain/issues/14232 | https://github.com/langchain-ai/langchain/pull/14646 | 7e6ca3c2b95594d7bdee4ba1337896b5dca1833e | ea2616ae23be97135696f7ace838ff38661426fc | "2023-12-04T16:07:02Z" | python | "2023-12-13T17:09:50Z" | libs/community/tests/integration_tests/vectorstores/test_neo4jvector.py | """Test Neo4jVector functionality."""
import os
from typing import List
from langchain_core.documents import Document
from langchain_community.vectorstores.neo4j_vector import Neo4jVector, SearchType
from langchain_community.vectorstores.utils import DistanceStrategy
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
url = os.environ.get("NEO4J_URL", "bolt://localhost:7687")
username = os.environ.get("NEO4J_USERNAME", "neo4j")
password = os.environ.get("NEO4J_PASSWORD", "pleaseletmein")
OS_TOKEN_COUNT = 1536
texts = ["foo", "bar", "baz"]
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f neo4j.yml up
"""
def drop_vector_indexes(store: Neo4jVector) -> None:
"""Cleanup all vector indexes"""
all_indexes = store.query(
"""
SHOW INDEXES YIELD name, type
WHERE type IN ["VECTOR", "FULLTEXT"]
RETURN name
"""
)
for index in all_indexes:
store.query(f"DROP INDEX {index['name']}")
class FakeEmbeddingsWithOsDimension(FakeEmbeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, embedding_texts: List[str]) -> List[List[float]]:
"""Return simple embeddings."""
return [
[float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(i + 1)]
for i in range(len(embedding_texts))
]
def embed_query(self, text: str) -> List[float]:
"""Return simple embeddings."""
return [float(1.0)] * (OS_TOKEN_COUNT - 1) + [float(texts.index(text) + 1)]
def test_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_euclidean() -> None:
"""Test euclidean distance"""
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
distance_strategy=DistanceStrategy.EUCLIDEAN_DISTANCE,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_embeddings() -> None:
"""Test end to end construction with embeddings and search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_catch_wrong_index_name() -> None:
"""Test if index name is misspelled, but node label and property are correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_catch_wrong_node_label() -> None:
"""Test if node label is misspelled, but index name is correct."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="test",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_with_metadatas() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_with_metadatas_with_scores() -> None:
"""Test end to end construction and search."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 1.0)]
drop_vector_indexes(docsearch)
def test_neo4jvector_relevance_score() -> None:
"""Test to make sure the relevance score is scaled to 0-1."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
output = docsearch.similarity_search_with_relevance_scores("foo", k=3)
assert output == [
(Document(page_content="foo", metadata={"page": "0"}), 1.0),
(Document(page_content="bar", metadata={"page": "1"}), 0.9998376369476318),
(Document(page_content="baz", metadata={"page": "2"}), 0.9993523359298706),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_retriever_search_threshold() -> None:
"""Test using retriever for searching with threshold."""
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Neo4jVector.from_texts(
texts=texts,
embedding=FakeEmbeddingsWithOsDimension(),
metadatas=metadatas,
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
retriever = docsearch.as_retriever(
search_type="similarity_score_threshold",
search_kwargs={"k": 3, "score_threshold": 0.9999},
)
output = retriever.get_relevant_documents("foo")
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
]
drop_vector_indexes(docsearch)
def test_custom_return_neo4jvector() -> None:
"""Test end to end construction and search."""
docsearch = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
retrieval_query="RETURN 'foo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_prefer_indexname() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["bar"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
output = existing_index.similarity_search("bar", k=1)
assert output == [Document(page_content="bar", metadata={})]
drop_vector_indexes(existing_index)
def test_neo4jvector_prefer_indexname_insert() -> None:
"""Test using when two indexes are found, prefer by index_name."""
Neo4jVector.from_texts(
texts=["baz"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
Neo4jVector.from_texts(
texts=["foo"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Test",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
existing_index = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
text_node_property="info",
)
existing_index.add_documents([Document(page_content="bar", metadata={})])
output = existing_index.similarity_search("bar", k=2)
assert output == [
Document(page_content="bar", metadata={}),
Document(page_content="foo", metadata={}),
]
drop_vector_indexes(existing_index)
def test_neo4jvector_hybrid() -> None:
"""Test end to end construction with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_deduplicate() -> None:
"""Test result deduplication with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
output = docsearch.similarity_search("foo", k=3)
assert output == [
Document(page_content="foo"),
Document(page_content="bar"),
Document(page_content="baz"),
]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN 'moo' AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="moo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_retrieval_query2() -> None:
"""Test custom retrieval_query with hybrid search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
retrieval_query="RETURN node.text AS text, score, {test: 'test'} AS metadata",
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"test": "test"})]
drop_vector_indexes(docsearch)
def test_neo4jvector_missing_keyword() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
docsearch = Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
)
try:
Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
search_type=SearchType.HYBRID,
)
except ValueError as e:
assert str(e) == (
"keyword_index name has to be specified when " "using hybrid search option"
)
drop_vector_indexes(docsearch)
def test_neo4jvector_hybrid_from_existing() -> None:
"""Test hybrid search with missing keyword_index_search."""
text_embeddings = FakeEmbeddingsWithOsDimension().embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
Neo4jVector.from_embeddings(
text_embeddings=text_embedding_pairs,
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
pre_delete_collection=True,
search_type=SearchType.HYBRID,
)
existing = Neo4jVector.from_existing_index(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
keyword_index_name="keyword",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph() -> None:
"""Test from_existing_graph with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_hybrid() -> None:
"""Test from_existing_graph hybrid with a single property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'foo'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: foo")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
def test_neo4jvector_from_existing_graph_multiple_properties_hybrid() -> None:
"""Test from_existing_graph with a two property."""
graph = Neo4jVector.from_texts(
texts=["test"],
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="foo",
node_label="Foo",
embedding_node_property="vector",
text_node_property="info",
pre_delete_collection=True,
)
graph.query("MATCH (n) DETACH DELETE n")
graph.query("CREATE (:Test {name:'Foo', name2: 'Fooz'})," "(:Test {name:'Bar'})")
existing = Neo4jVector.from_existing_graph(
embedding=FakeEmbeddingsWithOsDimension(),
url=url,
username=username,
password=password,
index_name="vector",
node_label="Test",
text_node_properties=["name", "name2"],
embedding_node_property="embedding",
search_type=SearchType.HYBRID,
)
output = existing.similarity_search("foo", k=1)
assert output == [Document(page_content="\nname: Foo\nname2: Fooz")]
drop_vector_indexes(existing)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,232 | similarity_search_with_score does not accept "!" in the query | ### System Info
python 3.11.6
langchain==0.0.345
langchain-core==0.0.9
jupyter_client==8.6.0
jupyter_core==5.5.0
ipykernel==6.27.0
ipython==8.17.2
on mac M2
### Who can help?
@baskaryan @tomasonjo @hwchase17
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [X] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
1. The setup is the same as in https://github.com/langchain-ai/langchain/issues/14231, although I don't think it matters.
2. Run `existing_graph.similarity_search_with_score("It is the end of the world. Take shelter!")`
3. It returns the following error
---------------------------------------------------------------------------
ClientError Traceback (most recent call last)
[/Users/josselinperrus/Projects/streetpress/neo4j.ipynb](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/neo4j.ipynb) Cell 17 line 1
----> [1](vscode-notebook-cell:/Users/josselinperrus/Projects/streetpress/neo4j.ipynb#X21sZmlsZQ%3D%3D?line=0) existing_graph.similarity_search_with_score("It is the end of the world. Take shelter !")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550), in Neo4jVector.similarity_search_with_score(self, query, k)
[540](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:540) """Return docs most similar to query.
[541](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:541)
[542](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:542) Args:
(...)
[547](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:547) List of Documents most similar to the query and score for each
[548](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:548) """
[549](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:549) embedding = self.embedding.embed_query(query)
--> [550](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:550) docs = self.similarity_search_with_score_by_vector(
[551](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:551) embedding=embedding, k=k, query=query
[552](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:552) )
[553](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:553) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595), in Neo4jVector.similarity_search_with_score_by_vector(self, embedding, k, **kwargs)
[586](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:586) read_query = _get_search_index_query(self.search_type) + retrieval_query
[587](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:587) parameters = {
[588](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:588) "index": self.index_name,
[589](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:589) "k": k,
(...)
[592](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:592) "query": kwargs["query"],
[593](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:593) }
--> [595](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:595) results = self.query(read_query, params=parameters)
[597](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:597) docs = [
[598](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:598) (
[599](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:599) Document(
(...)
[607](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:607) for result in results
[608](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:608) ]
[609](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:609) return docs
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in Neo4jVector.query(self, query, params)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242), in <listcomp>(.0)
[240](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:240) try:
[241](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:241) data = session.run(query, params)
--> [242](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:242) return [r.data() for r in data]
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:243) except CypherSyntaxError as e:
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/langchain/vectorstores/neo4j_vector.py:244) raise ValueError(f"Cypher Statement is not valid\n{e}")
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270), in Result.__iter__(self)
[268](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:268) yield self._record_buffer.popleft()
[269](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:269) elif self._streaming:
--> [270](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:270) self._connection.fetch_message()
[271](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:271) elif self._discarding:
[272](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/work/result.py:272) self._discard()
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178), in ConnectionErrorHandler.__getattr__.<locals>.outer.<locals>.inner(*args, **kwargs)
[176](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:176) def inner(*args, **kwargs):
[177](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:177) try:
--> [178](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:178) func(*args, **kwargs)
[179](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:179) except (Neo4jError, ServiceUnavailable, SessionExpired) as exc:
[180](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:180) assert not asyncio.iscoroutinefunction(self.__on_error)
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849), in Bolt.fetch_message(self)
[845](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:845) # Receive exactly one message
[846](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:846) tag, fields = self.inbox.pop(
[847](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:847) hydration_hooks=self.responses[0].hydration_hooks
[848](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:848) )
--> [849](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:849) res = self._process_message(tag, fields)
[850](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:850) self.idle_since = perf_counter()
[851](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt.py:851) return res
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374), in Bolt5x0._process_message(self, tag, fields)
[372](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:372) self._server_state_manager.state = self.bolt_states.FAILED
[373](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:373) try:
--> [374](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:374) response.on_failure(summary_metadata or {})
[375](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:375) except (ServiceUnavailable, DatabaseUnavailable):
[376](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_bolt5.py:376) if self.pool:
File [~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245), in Response.on_failure(self, metadata)
[243](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:243) handler = self.handlers.get("on_summary")
[244](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:244) Util.callback(handler)
--> [245](https://file+.vscode-resource.vscode-cdn.net/Users/josselinperrus/Projects/streetpress/~/Projects/streetpress/venv/lib/python3.11/site-packages/neo4j/_sync/io/_common.py:245) raise Neo4jError.hydrate(**metadata)
ClientError: {code: Neo.ClientError.Procedure.ProcedureCallFailed} {message: Failed to invoke procedure `db.index.fulltext.queryNodes`: Caused by: org.apache.lucene.queryparser.classic.ParseException: Encountered "<EOF>" at line 1, column 42.
Was expecting one of:
<BAREOPER> ...
"(" ...
"*" ...
<QUOTED> ...
<TERM> ...
<PREFIXTERM> ...
<WILDTERM> ...
<REGEXPTERM> ...
"[" ...
"{" ...
<NUMBER> ...
<TERM> ...
"*" ...
}
### Expected behavior
No error | https://github.com/langchain-ai/langchain/issues/14232 | https://github.com/langchain-ai/langchain/pull/14646 | 7e6ca3c2b95594d7bdee4ba1337896b5dca1833e | ea2616ae23be97135696f7ace838ff38661426fc | "2023-12-04T16:07:02Z" | python | "2023-12-13T17:09:50Z" | libs/community/tests/unit_tests/vectorstores/test_neo4j.py | |
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,402 | AzureOpenAIEmbeddings cannot authenticate with azure_ad_token_provider | ### System Info
Langchain: 0.0.346
OpenAI: 1.3.7
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [X] My own modified scripts
### Related Components
- [ ] LLMs/Chat Models
- [X] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
Simple script to authenticate to Azure with RBAC
```
from langchain.embeddings import AzureOpenAIEmbeddings
from azure.identity import DefaultAzureCredential, get_bearer_token_provider
token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")
embeddings = AzureOpenAIEmbeddings(azure_endpoint='xxxxxxx', azure_ad_token_provider=token_provider)
```
### Expected behavior
Should authenticate, but is seems like the `azure_ad_token_provider` is not added to the values dict
langchain/embeddings/azure_openai.py line 80-86
```
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
```
Other parameters are added to values, but not `azure_ad_token_provider` | https://github.com/langchain-ai/langchain/issues/14402 | https://github.com/langchain-ai/langchain/pull/14432 | 8a4162d15ef670a202b25fe483e80fe2ae3f5668 | dce3c74905ce2632f614961d3462960d2ce2ad28 | "2023-12-07T15:35:19Z" | python | "2023-12-13T22:37:39Z" | libs/community/langchain_community/embeddings/azure_openai.py | """Azure OpenAI embeddings wrapper."""
from __future__ import annotations
import os
import warnings
from typing import Dict, Optional, Union
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.utils import get_from_dict_or_env
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
class AzureOpenAIEmbeddings(OpenAIEmbeddings):
"""`Azure OpenAI` Embeddings API."""
azure_endpoint: Union[str, None] = None
"""Your Azure endpoint, including the resource.
Automatically inferred from env var `AZURE_OPENAI_ENDPOINT` if not provided.
Example: `https://example-resource.azure.openai.com/`
"""
deployment: Optional[str] = Field(default=None, alias="azure_deployment")
"""A model deployment.
If given sets the base client URL to include `/deployments/{azure_deployment}`.
Note: this means you won't be able to use non-deployment endpoints.
"""
openai_api_key: Union[str, None] = Field(default=None, alias="api_key")
"""Automatically inferred from env var `AZURE_OPENAI_API_KEY` if not provided."""
azure_ad_token: Union[str, None] = None
"""Your Azure Active Directory token.
Automatically inferred from env var `AZURE_OPENAI_AD_TOKEN` if not provided.
For more:
https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id.
""" # noqa: E501
azure_ad_token_provider: Union[str, None] = None
"""A function that returns an Azure Active Directory token.
Will be invoked on every request.
"""
openai_api_version: Optional[str] = Field(default=None, alias="api_version")
"""Automatically inferred from env var `OPENAI_API_VERSION` if not provided."""
validate_base_url: bool = True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
# Check OPENAI_KEY for backwards compatibility.
# TODO: Remove OPENAI_API_KEY support to avoid possible conflict when using
# other forms of azure credentials.
values["openai_api_key"] = (
values["openai_api_key"]
or os.getenv("AZURE_OPENAI_API_KEY")
or os.getenv("OPENAI_API_KEY")
)
values["openai_api_base"] = values["openai_api_base"] or os.getenv(
"OPENAI_API_BASE"
)
values["openai_api_version"] = values["openai_api_version"] or os.getenv(
"OPENAI_API_VERSION", default="2023-05-15"
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
values["openai_organization"] = (
values["openai_organization"]
or os.getenv("OPENAI_ORG_ID")
or os.getenv("OPENAI_ORGANIZATION")
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
values["azure_endpoint"] = values["azure_endpoint"] or os.getenv(
"AZURE_OPENAI_ENDPOINT"
)
values["azure_ad_token"] = values["azure_ad_token"] or os.getenv(
"AZURE_OPENAI_AD_TOKEN"
)
# Azure OpenAI embedding models allow a maximum of 16 texts
# at a time in each batch
# See: https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
values["chunk_size"] = min(values["chunk_size"], 16)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility. Before openai v1, no distinction was made
# between azure_endpoint and base_url (openai_api_base).
openai_api_base = values["openai_api_base"]
if openai_api_base and values["validate_base_url"]:
if "/openai" not in openai_api_base:
values["openai_api_base"] += "/openai"
warnings.warn(
"As of openai>=1.0.0, Azure endpoints should be specified via "
f"the `azure_endpoint` param not `openai_api_base` "
f"(or alias `base_url`). Updating `openai_api_base` from "
f"{openai_api_base} to {values['openai_api_base']}."
)
if values["deployment"]:
warnings.warn(
"As of openai>=1.0.0, if `deployment` (or alias "
"`azure_deployment`) is specified then "
"`openai_api_base` (or alias `base_url`) should not be. "
"Instead use `deployment` (or alias `azure_deployment`) "
"and `azure_endpoint`."
)
if values["deployment"] not in values["openai_api_base"]:
warnings.warn(
"As of openai>=1.0.0, if `openai_api_base` "
"(or alias `base_url`) is specified it is expected to be "
"of the form "
"https://example-resource.azure.openai.com/openai/deployments/example-deployment. " # noqa: E501
f"Updating {openai_api_base} to "
f"{values['openai_api_base']}."
)
values["openai_api_base"] += (
"/deployments/" + values["deployment"]
)
values["deployment"] = None
client_params = {
"api_version": values["openai_api_version"],
"azure_endpoint": values["azure_endpoint"],
"azure_deployment": values["deployment"],
"api_key": values["openai_api_key"],
"azure_ad_token": values["azure_ad_token"],
"azure_ad_token_provider": values["azure_ad_token_provider"],
"organization": values["openai_organization"],
"base_url": values["openai_api_base"],
"timeout": values["request_timeout"],
"max_retries": values["max_retries"],
"default_headers": values["default_headers"],
"default_query": values["default_query"],
"http_client": values["http_client"],
}
values["client"] = openai.AzureOpenAI(**client_params).embeddings
values["async_client"] = openai.AsyncAzureOpenAI(**client_params).embeddings
else:
values["client"] = openai.Embedding
return values
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,699 | sqlalchemy.exc.InvalidRequestError: Table 'langchain_pg_collection' is already defined for this MetaData instance. | ### Issue you'd like to raise.
what is this issue, and how can i resolve it:
```python
os.environ["AZURE_OPENAI_API_KEY"] = AZURE_OPENAI_API_KEY
os.environ["AZURE_OPENAI_ENDPOINT"] = AZURE_OPENAI_ENDPOINT
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2023-05-15"
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
embedding = OpenAIEmbeddings()
COLLECTION_NAME = "network_team_documents"
CONNECTION_STRING = PGVector.connection_string_from_db_params(
driver=os.environ.get(DB_DRIVER, DB_DRIVER),
host=os.environ.get(DB_HOST, DB_HOST),
port=int(os.environ.get(DB_PORT, DB_PORT)),
database=os.environ.get(DB_DB, DB_DB),
user=os.environ.get(DB_USER, DB_USER),
password=os.environ.get(DB_PASS, DB_PASS),
)
store = PGVector(
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
embedding_function=embedding,
extend_existing=True,
)
gpt4 = AzureChatOpenAI(
azure_deployment="GPT4",
openai_api_version="2023-05-15",
)
retriever = store.as_retriever(search_type="similarity", search_kwargs={"k": 10})
qa_chain = RetrievalQA.from_chain_type(llm=gpt4,
chain_type="stuff",
retriever=retriever,
return_source_documents=True)
return qa_chain
```
```python
Traceback (most recent call last):
File "/opt/network_tool/chatbot/views.py", line 21, in chat
chat_object = create_session()
File "/opt/network_tool/chatbot/chatbot_functions.py", line 95, in create_session
store = PGVector(
File "/opt/klevernet_venv/lib/python3.10/site-packages/langchain_community/vectorstores/pgvector.py", line 199, in __init__
self.__post_init__()
File "/opt/klevernet_venv/lib/python3.10/site-packages/langchain_community/vectorstores/pgvector.py", line 207, in __post_init__
EmbeddingStore, CollectionStore = _get_embedding_collection_store()
File "/opt/klevernet_venv/lib/python3.10/site-packages/langchain_community/vectorstores/pgvector.py", line 66, in _get_embedding_collection_store
class CollectionStore(BaseModel):
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_api.py", line 195, in __init__
_as_declarative(reg, cls, dict_)
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py", line 247, in _as_declarative
return _MapperConfig.setup_mapping(registry, cls, dict_, None, {})
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py", line 328, in setup_mapping
return _ClassScanMapperConfig(
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py", line 578, in __init__
self._setup_table(table)
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/orm/decl_base.py", line 1729, in _setup_table
table_cls(
File "", line 2, in __new__
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/util/deprecations.py", line 281, in warned
return fn(*args, **kwargs) # type: ignore[no-any-return]
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/sql/schema.py", line 436, in __new__
return cls._new(*args, **kw)
File "/opt/klevernet_venv/lib/python3.10/site-packages/sqlalchemy/sql/schema.py", line 468, in _new
raise exc.InvalidRequestError(
sqlalchemy.exc.InvalidRequestError: Table 'langchain_pg_collection' is already defined for this MetaData instance. Specify 'extend_existing=True' to redefine options and columns on an existing Table object.
```
### Suggestion:
_No response_ | https://github.com/langchain-ai/langchain/issues/14699 | https://github.com/langchain-ai/langchain/pull/14726 | 1cec0afc62697ee730aa1378b69ab4556ffecffc | 9fb26a2a718bc18a5ce48f04cf6b66bc4751b734 | "2023-12-14T06:51:40Z" | python | "2023-12-14T21:27:30Z" | libs/community/langchain_community/vectorstores/pgvector.py | from __future__ import annotations
import asyncio
import contextlib
import enum
import logging
import uuid
from functools import partial
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
import sqlalchemy
from sqlalchemy import delete
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.orm import Session, relationship
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_core.utils import get_from_dict_or_env
from langchain_core.vectorstores import VectorStore
from langchain_community.vectorstores.utils import maximal_marginal_relevance
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
Base = declarative_base() # type: Any
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for the SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
def _get_embedding_collection_store() -> Any:
from pgvector.sqlalchemy import Vector
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(
cls, session: Session, name: str
) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first() # type: ignore
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
""" # noqa: E501
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = "langchain_pg_embedding"
collection_id = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding: Vector = sqlalchemy.Column(Vector(None))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
return EmbeddingStore, CollectionStore
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class PGVector(VectorStore):
"""`Postgres`/`PGVector` vector store.
To use, you should have the ``pgvector`` python package installed.
Args:
connection_string: Postgres connection string.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
engine_args: SQLAlchemy's create engine arguments.
Example:
.. code-block:: python
from langchain_community.vectorstores import PGVector
from langchain_community.embeddings.openai import OpenAIEmbeddings
CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = PGVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
)
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
*,
connection: Optional[sqlalchemy.engine.Connection] = None,
engine_args: Optional[dict[str, Any]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.engine_args = engine_args or {}
# Create a connection if not provided, otherwise use the provided connection
self._conn = connection if connection else self.connect()
self.__post_init__()
def __post_init__(
self,
) -> None:
"""Initialize the store."""
self.create_vector_extension()
EmbeddingStore, CollectionStore = _get_embedding_collection_store()
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection()
def __del__(self) -> None:
if self._conn:
self._conn.close()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string, **self.engine_args)
conn = engine.connect()
return conn
def create_vector_extension(self) -> None:
try:
with Session(self._conn) as session:
# The advisor lock fixes issue arising from concurrent
# creation of the vector extension.
# https://github.com/langchain-ai/langchain/issues/12933
# For more information see:
# https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS
statement = sqlalchemy.text(
"BEGIN;"
"SELECT pg_advisory_xact_lock(1573678846307946496);"
"CREATE EXTENSION IF NOT EXISTS vector;"
"COMMIT;"
)
session.execute(statement)
session.commit()
except Exception as e:
raise Exception(f"Failed to create vector extension: {e}") from e
def create_tables_if_not_exists(self) -> None:
with self._conn.begin():
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
self.CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
@contextlib.contextmanager
def _make_session(self) -> Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield Session(self._conn)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._conn) as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit()
def get_collection(self, session: Session) -> Any:
return self.CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = self.EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
collection_id=collection.uuid,
)
session.add(embedding_store)
session.commit()
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
@property
def distance_strategy(self) -> Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results = self.__query_collection(embedding=embedding, k=k, filter=filter)
return self._results_to_docs_and_scores(results)
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def __query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Any]:
"""Query the collection."""
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
IN, NIN = "in", "nin"
for key, value in filter.items():
if isinstance(value, dict):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
if IN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.in_(value_case_insensitive[IN])
elif NIN in map(str.lower, value):
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.not_in(value_case_insensitive[NIN])
else:
filter_by_metadata = None
if filter_by_metadata is not None:
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = (
session.query(
self.EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid,
)
.limit(k)
.all()
)
return results
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return _results_to_docs(docs_and_scores)
@classmethod
def from_texts(
cls: Type[PGVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""Construct PGVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain_community.vectorstores import PGVector
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Get instance of an existing PGVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGVector],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
results = self.__query_collection(embedding=embedding, k=fetch_k, filter=filter)
embedding_list = [result.EmbeddingStore.embedding for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return await asyncio.get_event_loop().run_in_executor(None, func)
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 14,904 | Issue: cannot import name 'LLMContentHandler' from 'langchain.llms.sagemaker_endpoint | Cannot import LLMContentHandler
langchain: 0.0.351
python: 3.9
To reproduce:
``` python
from langchain.llms.sagemaker_endpoint import LLMContentHandler
```
Issue could be resolved by updating
https://github.com/langchain-ai/langchain/blob/583696732cbaa3d1cf3a3a9375539a7e8785850c/libs/langchain/langchain/llms/sagemaker_endpoint.py#L1C5-L7
as follow:
``` python
from langchain_community.llms.sagemaker_endpoint import (
LLMContentHandler,
SagemakerEndpoint,
)
__all__ = [
"SagemakerEndpoint",
"LLMContentHandler"
]
```
| https://github.com/langchain-ai/langchain/issues/14904 | https://github.com/langchain-ai/langchain/pull/14906 | 4f4b078bf3b2ea3c150f0a03765eae24efda6c1a | 1069a93d187bcaa290d087d3754ef1189882add9 | "2023-12-19T13:53:31Z" | python | "2023-12-19T15:00:32Z" | libs/langchain/langchain/llms/sagemaker_endpoint.py | from langchain_community.llms.sagemaker_endpoint import (
SagemakerEndpoint,
)
__all__ = [
"SagemakerEndpoint",
]
|
closed | langchain-ai/langchain | https://github.com/langchain-ai/langchain | 5,861 | KeyError 'content' | ### System Info
Langchain version 165
Python 3.9
### Who can help?
_No response_
### Information
- [ ] The official example notebooks/scripts
- [ ] My own modified scripts
### Related Components
- [X] LLMs/Chat Models
- [ ] Embedding Models
- [ ] Prompts / Prompt Templates / Prompt Selectors
- [ ] Output Parsers
- [ ] Document Loaders
- [ ] Vector Stores / Retrievers
- [ ] Memory
- [ ] Agents / Agent Executors
- [ ] Tools / Toolkits
- [ ] Chains
- [ ] Callbacks/Tracing
- [ ] Async
### Reproduction
I call llm with llm.generate(xxx) on my code.
We are connected to the Azure OpenAI Service, and strangely enough, in a production environment, the following error is occasionally returned:
`File \"/usr/local/lib/python3.9/site-packages/langchain/chat_models/openai.py\", line 75, in _convert_dict_to_message return AIMessage( content=_dict[\"content\"]) KeyError: 'content'`
Checked the Langchain source code, it is in this piece of code can not find the 'content' element, take the message locally and retry, the message body is normal:
``` python
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict["content"])
elif role == "system":
return SystemMessage(content=_dict["content"])
else:
return ChatMessage(content=_dict["content"], role=role)
```
Suggestions for fixing:
1、When there is an error, can the error log be more detailed?
2、whether to provide a method to return only the response, the caller to deal with their own?
### Expected behavior
should have no error | https://github.com/langchain-ai/langchain/issues/5861 | https://github.com/langchain-ai/langchain/pull/14765 | b0588774f142e00d24c6852077a57b56e3888022 | 5642132c0c615ecd0984d5e9c45ef6076ccc69d2 | "2023-06-08T03:09:03Z" | python | "2023-12-20T06:17:23Z" | libs/community/langchain_community/adapters/openai.py | from __future__ import annotations
import importlib
from typing import (
Any,
AsyncIterator,
Dict,
Iterable,
List,
Mapping,
Sequence,
Union,
overload,
)
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
BaseMessageChunk,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
ToolMessage,
)
from langchain_core.pydantic_v1 import BaseModel
from typing_extensions import Literal
async def aenumerate(
iterable: AsyncIterator[Any], start: int = 0
) -> AsyncIterator[tuple[int, Any]]:
"""Async version of enumerate function."""
i = start
async for x in iterable:
yield i, x
i += 1
class IndexableBaseModel(BaseModel):
"""Allows a BaseModel to return its fields by string variable indexing"""
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
class Choice(IndexableBaseModel):
message: dict
class ChatCompletions(IndexableBaseModel):
choices: List[Choice]
class ChoiceChunk(IndexableBaseModel):
delta: dict
class ChatCompletionChunk(IndexableBaseModel):
choices: List[ChoiceChunk]
def convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
"""Convert a dictionary to a LangChain message.
Args:
_dict: The dictionary.
Returns:
The LangChain message.
"""
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
# Fix for azure
# Also OpenAI returns None for tool invocations
content = _dict.get("content", "") or ""
additional_kwargs: Dict = {}
if _dict.get("function_call"):
additional_kwargs["function_call"] = dict(_dict["function_call"])
if _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = _dict["tool_calls"]
return AIMessage(content=content, additional_kwargs=additional_kwargs)
elif role == "system":
return SystemMessage(content=_dict["content"])
elif role == "function":
return FunctionMessage(content=_dict["content"], name=_dict["name"])
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return ToolMessage(
content=_dict["content"],
tool_call_id=_dict["tool_call_id"],
additional_kwargs=additional_kwargs,
)
else:
return ChatMessage(content=_dict["content"], role=role)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a LangChain message to a dictionary.
Args:
message: The LangChain message.
Returns:
The dictionary.
"""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
# If tool calls only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"content": message.content,
"tool_call_id": message.tool_call_id,
}
else:
raise TypeError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def convert_openai_messages(messages: Sequence[Dict[str, Any]]) -> List[BaseMessage]:
"""Convert dictionaries representing OpenAI messages to LangChain format.
Args:
messages: List of dictionaries representing OpenAI messages
Returns:
List of LangChain BaseMessage objects.
"""
return [convert_dict_to_message(m) for m in messages]
def _convert_message_chunk(chunk: BaseMessageChunk, i: int) -> dict:
_dict: Dict[str, Any] = {}
if isinstance(chunk, AIMessageChunk):
if i == 0:
# Only shows up in the first chunk
_dict["role"] = "assistant"
if "function_call" in chunk.additional_kwargs:
_dict["function_call"] = chunk.additional_kwargs["function_call"]
# If the first chunk is a function call, the content is not empty string,
# not missing, but None.
if i == 0:
_dict["content"] = None
else:
_dict["content"] = chunk.content
else:
raise ValueError(f"Got unexpected streaming chunk type: {type(chunk)}")
# This only happens at the end of streams, and OpenAI returns as empty dict
if _dict == {"content": ""}:
_dict = {}
return _dict
def _convert_message_chunk_to_delta(chunk: BaseMessageChunk, i: int) -> Dict[str, Any]:
_dict = _convert_message_chunk(chunk, i)
return {"choices": [{"delta": _dict}]}
class ChatCompletion:
"""Chat completion."""
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict:
...
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> Iterable:
...
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, Iterable]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
for i, c in enumerate(model_config.stream(converted_messages))
)
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> dict:
...
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> AsyncIterator:
...
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[dict, AsyncIterator]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = await model_config.ainvoke(converted_messages)
return {"choices": [{"message": convert_message_to_dict(result)}]}
else:
return (
_convert_message_chunk_to_delta(c, i)
async for i, c in aenumerate(model_config.astream(converted_messages))
)
def _has_assistant_message(session: ChatSession) -> bool:
"""Check if chat session has an assistant message."""
return any([isinstance(m, AIMessage) for m in session["messages"]])
def convert_messages_for_finetuning(
sessions: Iterable[ChatSession],
) -> List[List[dict]]:
"""Convert messages to a list of lists of dictionaries for fine-tuning.
Args:
sessions: The chat sessions.
Returns:
The list of lists of dictionaries.
"""
return [
[convert_message_to_dict(s) for s in session["messages"]]
for session in sessions
if _has_assistant_message(session)
]
class Completions:
"""Completion."""
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> ChatCompletions:
...
@overload
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> Iterable:
...
@staticmethod
def create(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[ChatCompletions, Iterable]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = model_config.invoke(converted_messages)
return ChatCompletions(
choices=[Choice(message=convert_message_to_dict(result))]
)
else:
return (
ChatCompletionChunk(
choices=[ChoiceChunk(delta=_convert_message_chunk(c, i))]
)
for i, c in enumerate(model_config.stream(converted_messages))
)
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[False] = False,
**kwargs: Any,
) -> ChatCompletions:
...
@overload
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: Literal[True],
**kwargs: Any,
) -> AsyncIterator:
...
@staticmethod
async def acreate(
messages: Sequence[Dict[str, Any]],
*,
provider: str = "ChatOpenAI",
stream: bool = False,
**kwargs: Any,
) -> Union[ChatCompletions, AsyncIterator]:
models = importlib.import_module("langchain.chat_models")
model_cls = getattr(models, provider)
model_config = model_cls(**kwargs)
converted_messages = convert_openai_messages(messages)
if not stream:
result = await model_config.ainvoke(converted_messages)
return ChatCompletions(
choices=[Choice(message=convert_message_to_dict(result))]
)
else:
return (
ChatCompletionChunk(
choices=[ChoiceChunk(delta=_convert_message_chunk(c, i))]
)
async for i, c in aenumerate(model_config.astream(converted_messages))
)
class Chat:
def __init__(self) -> None:
self.completions = Completions()
chat = Chat()
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 516 | The task instance of MR cannot stop in some cases | The task instance of MR cannot stop in some cases | https://github.com/apache/dolphinscheduler/issues/516 | https://github.com/apache/dolphinscheduler/pull/560 | f880a76d7a3a95d4b425a16853456025292b8c03 | d3e59773085cf287e908a789b58a14285a57da43 | "2019-07-04T04:51:57Z" | java | "2019-07-10T09:21:12Z" | escheduler-server/src/main/java/cn/escheduler/server/utils/LoggerUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cn.escheduler.server.utils;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* logger utils
*/
public class LoggerUtils {
/**
* rules for extracting application ID
*/
private static final Pattern APPLICATION_REGEX = Pattern.compile("\\d+_\\d+");
/**
* build job id
* @param affix
* @param processDefId
* @param processInstId
* @param taskId
* @return
*/
public static String buildTaskId(String affix,
int processDefId,
int processInstId,
int taskId){
return String.format("%s_%s_%s_%s",affix,
processDefId,
processInstId,
taskId);
}
/**
* processing log
* get yarn application id list
* @param log
* @param logger
* @return
*/
public static List<String> getAppIds(String log, Logger logger) {
List<String> appIds = new ArrayList<String>();
Matcher matcher = APPLICATION_REGEX.matcher(log);
// analyse logs to get all submit yarn application id
while (matcher.find()) {
String appId = matcher.group();
if(!appIds.contains(appId)){
logger.info("find app id: {}", appId);
appIds.add(appId);
}
}
return appIds;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,399 | [BUG] The wrong field order in logger.info | **Describe the bug**
The method onlineCreateResource in class ResourcesController,The logger.info has wrong field order with fileName and type.
**Screenshots**
<img width="1021" alt="" src="https://user-images.githubusercontent.com/3699743/70215243-0f64cc80-1778-11ea-9ec5-f6a38559b051.png">
**Which version of Dolphin Scheduler:**
- [dev-db branch]
| https://github.com/apache/dolphinscheduler/issues/1399 | https://github.com/apache/dolphinscheduler/pull/1400 | b3a0e1fd148c876d61bce92fea14528c3fb2cf45 | f134453bb838c76f046fc450ef15afccd857074e | "2019-12-05T07:59:57Z" | java | "2019-12-05T09:04:54Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.controller;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.ResourcesService;
import org.apache.dolphinscheduler.api.service.UdfFuncService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiImplicitParam;
import io.swagger.annotations.ApiImplicitParams;
import io.swagger.annotations.ApiOperation;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.core.io.Resource;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import springfox.documentation.annotations.ApiIgnore;
import java.util.Map;
import static org.apache.dolphinscheduler.api.enums.Status.*;
/**
* resources controller
*/
@Api(tags = "RESOURCES_TAG", position = 1)
@RestController
@RequestMapping("resources")
public class ResourcesController extends BaseController{
private static final Logger logger = LoggerFactory.getLogger(ResourcesController.class);
@Autowired
private ResourcesService resourceService;
@Autowired
private UdfFuncService udfFuncService;
/**
* create resource
*
* @param loginUser login user
* @param alias alias
* @param description description
* @param type type
* @param file file
* @return create result code
*/
@ApiOperation(value = "createResource", notes= "CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
})
@PostMapping(value = "/create")
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name")String alias,
@RequestParam(value = "description", required = false) String description,
@RequestParam("file") MultipartFile file) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename());
return resourceService.createResource(loginUser,alias, description,type ,file);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
}
}
/**
* update resource
*
* @param loginUser login user
* @param alias alias
* @param resourceId resource id
* @param type resource type
* @param description description
* @return update result code
*/
@ApiOperation(value = "updateResource", notes= "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true,dataType = "MultipartFile")
})
@PostMapping(value = "/update")
public Result updateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="id") int resourceId,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="name")String alias,
@RequestParam(value = "description", required = false) String description) {
try {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(),type, alias, description);
return resourceService.updateResource(loginUser,resourceId,alias, description,type);
} catch (Exception e) {
logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg());
}
}
/**
* query resources list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes= "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType")
})
@GetMapping(value="/list")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type
){
try{
logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = resourceService.queryResourceList(loginUser, type);
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_ERROR.getMsg(),e);
return error(Status.QUERY_RESOURCES_LIST_ERROR.getCode(), Status.QUERY_RESOURCES_LIST_ERROR.getMsg());
}
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
@ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
})
@GetMapping(value="/list-paging")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
){
try{
logger.info("query resource list, login user:{}, resource type:{}, search value:{}",
loginUser.getUserName(), type.toString(), searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
searchVal = ParameterUtils.handleEscapes(searchVal);
result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e);
return error(Status.QUERY_RESOURCES_LIST_PAGING.getCode(), Status.QUERY_RESOURCES_LIST_PAGING.getMsg());
}
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
*/
@ApiOperation(value = "deleteResource", notes= "DELETE_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/delete")
@ResponseStatus(HttpStatus.OK)
public Result deleteResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="id") int resourceId
) {
try{
logger.info("login user {}, delete resource id: {}",
loginUser.getUserName(),resourceId);
return resourceService.delete(loginUser,resourceId);
}catch (Exception e){
logger.error(DELETE_RESOURCE_ERROR.getMsg(),e);
return error(Status.DELETE_RESOURCE_ERROR.getCode(), Status.DELETE_RESOURCE_ERROR.getMsg());
}
}
/**
* verify resource by alias and type
*
* @param loginUser login user
* @param alias resource name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="name") String alias,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
loginUser.getUserName(), alias,type);
return resourceService.verifyResourceName(alias,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());
}
}
/**
* view resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
@ApiOperation(value = "viewResource", notes= "VIEW_RESOURCE_BY_ID_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "skipLineNum", value = "SKIP_LINE_NUM", required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/view")
public Result viewResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "skipLineNum") int skipLineNum,
@RequestParam(value = "limit") int limit
) {
try{
logger.info("login user {}, view resource : {}, skipLineNum {} , limit {}",
loginUser.getUserName(),resourceId,skipLineNum,limit);
return resourceService.readResource(resourceId,skipLineNum,limit);
}catch (Exception e){
logger.error(VIEW_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.VIEW_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.VIEW_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
}
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param description description
* @param content content
* @return create result code
*/
@ApiOperation(value = "onlineCreateResource", notes= "ONLINE_CREATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
@ApiImplicitParam(name = "fileName", value = "RESOURCE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "suffix", value = "SUFFIX", required = true, dataType ="String"),
@ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
@ApiImplicitParam(name = "content", value = "CONTENT",required = true, dataType ="String")
})
@PostMapping(value = "/online-create")
public Result onlineCreateResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
@RequestParam(value ="fileName")String fileName,
@RequestParam(value ="suffix")String fileSuffix,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "content") String content
) {
try{
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
loginUser.getUserName(),type,fileName,fileSuffix,description,content);
if(StringUtils.isEmpty(content)){
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content);
}catch (Exception e){
logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
}
}
/**
* edit resource file online
*
* @param loginUser login user
* @param resourceId resource id
* @param content content
* @return update result code
*/
@ApiOperation(value = "updateResourceContent", notes= "UPDATE_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100"),
@ApiImplicitParam(name = "content", value = "CONTENT",required = true, dataType ="String")
})
@PostMapping(value = "/update-content")
public Result updateResourceContent(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId,
@RequestParam(value = "content") String content
) {
try{
logger.info("login user {}, updateProcessInstance resource : {}",
loginUser.getUserName(),resourceId);
if(StringUtils.isEmpty(content)){
logger.error("The resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
return resourceService.updateResourceContent(resourceId,content);
}catch (Exception e){
logger.error(EDIT_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.EDIT_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.EDIT_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
}
}
/**
* download resource file
*
* @param loginUser login user
* @param resourceId resource id
* @return resource content
*/
@ApiOperation(value = "downloadResource", notes= "DOWNLOAD_RESOURCE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/download")
@ResponseBody
public ResponseEntity downloadResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int resourceId) {
try{
logger.info("login user {}, download resource : {}",
loginUser.getUserName(), resourceId);
Resource file = resourceService.downloadResource(resourceId);
if (file == null) {
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.RESOURCE_NOT_EXIST.getMsg());
}
return ResponseEntity
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
}catch (Exception e){
logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg());
}
}
/**
* create udf function
* @param loginUser login user
* @param type udf type
* @param funcName function name
* @param argTypes argument types
* @param database database
* @param description description
* @param className class name
* @param resourceId resource id
* @return create result code
*/
@ApiOperation(value = "createUdfFunc", notes= "CREATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType ="UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType ="String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType ="String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType ="String"),
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@PostMapping(value = "/udf-func/create")
@ResponseStatus(HttpStatus.CREATED)
public Result createUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") UdfType type,
@RequestParam(value ="funcName")String funcName,
@RequestParam(value ="className")String className,
@RequestParam(value ="argTypes", required = false)String argTypes,
@RequestParam(value ="database", required = false)String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
logger.info("login user {}, create udf function, type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(),type, funcName, argTypes,database,description, resourceId);
Result result = new Result();
try {
return udfFuncService.createUdfFunction(loginUser,funcName,className,argTypes,database,description,type,resourceId);
} catch (Exception e) {
logger.error(CREATE_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.CREATE_UDF_FUNCTION_ERROR.getCode(), Status.CREATE_UDF_FUNCTION_ERROR.getMsg());
}
}
/**
* view udf function
*
* @param loginUser login user
* @param id resource id
* @return udf function detail
*/
@ApiOperation(value = "viewUIUdfFunction", notes= "VIEW_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "resourceId", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/udf-func/update-ui")
@ResponseStatus(HttpStatus.OK)
public Result viewUIUdfFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("id") int id) {
logger.info("login user {}, query udf{}",
loginUser.getUserName(), id);
try {
Map<String, Object> map = udfFuncService.queryUdfFuncDetail(id);
return returnDataList(map);
} catch (Exception e) {
logger.error(VIEW_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.VIEW_UDF_FUNCTION_ERROR.getCode(), Status.VIEW_UDF_FUNCTION_ERROR.getMsg());
}
}
/**
* update udf function
*
* @param loginUser login user
* @param type resource type
* @param funcName function name
* @param argTypes argument types
* @param database data base
* @param description description
* @param resourceId resource id
* @param className class name
* @param udfFuncId udf function id
* @return update result code
*/
@ApiOperation(value = "updateUdfFunc", notes= "UPDATE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType ="UdfType"),
@ApiImplicitParam(name = "funcName", value = "FUNC_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "suffix", value = "CLASS_NAME", required = true, dataType ="String"),
@ApiImplicitParam(name = "argTypes", value = "ARG_TYPES", dataType ="String"),
@ApiImplicitParam(name = "database", value = "DATABASE_NAME", dataType ="String"),
@ApiImplicitParam(name = "description", value = "UDF_DESC", dataType ="String"),
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@PostMapping(value = "/udf-func/update")
public Result updateUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "id") int udfFuncId,
@RequestParam(value = "type") UdfType type,
@RequestParam(value ="funcName")String funcName,
@RequestParam(value ="className")String className,
@RequestParam(value ="argTypes", required = false)String argTypes,
@RequestParam(value ="database", required = false)String database,
@RequestParam(value = "description", required = false) String description,
@RequestParam(value = "resourceId") int resourceId) {
try {
logger.info("login user {}, updateProcessInstance udf function id: {},type: {}, funcName: {},argTypes: {} ,database: {},desc: {},resourceId: {}",
loginUser.getUserName(),udfFuncId,type, funcName, argTypes,database,description, resourceId);
Map<String, Object> result = udfFuncService.updateUdfFunc(udfFuncId,funcName,className,argTypes,database,description,type,resourceId);
return returnDataList(result);
} catch (Exception e) {
logger.error(UPDATE_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.UPDATE_UDF_FUNCTION_ERROR.getCode(), Status.UPDATE_UDF_FUNCTION_ERROR.getMsg());
}
}
/**
* query udf function list paging
*
* @param loginUser login user
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return udf function list page
*/
@ApiOperation(value = "queryUdfFuncListPaging", notes= "QUERY_UDF_FUNCTION_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
})
@GetMapping(value="/udf-func/list-paging")
@ResponseStatus(HttpStatus.OK)
public Result queryUdfFuncList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
){
try{
logger.info("query udf functions list, login user:{},search value:{}",
loginUser.getUserName(), searchVal);
Map<String, Object> result = checkPageParams(pageNo, pageSize);
if(result.get(Constants.STATUS) != Status.SUCCESS){
return returnDataListPaging(result);
}
result = udfFuncService.queryUdfFuncListPaging(loginUser,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_UDF_FUNCTION_LIST_PAGING_ERROR.getMsg(),e);
return error(Status.QUERY_UDF_FUNCTION_LIST_PAGING_ERROR.getCode(), Status.QUERY_UDF_FUNCTION_LIST_PAGING_ERROR.getMsg());
}
}
/**
* query resource list by type
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
@ApiOperation(value = "queryResourceList", notes= "QUERY_RESOURCE_LIST_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "UDF_TYPE", required = true, dataType ="UdfType")
})
@GetMapping(value="/udf-func/list")
@ResponseStatus(HttpStatus.OK)
public Result queryResourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("type") UdfType type){
try{
logger.info("query datasource list, user:{}, type:{}", loginUser.getUserName(), type.toString());
Map<String, Object> result = udfFuncService.queryResourceList(loginUser,type.ordinal());
return returnDataList(result);
}catch (Exception e){
logger.error(QUERY_DATASOURCE_BY_TYPE_ERROR.getMsg(),e);
return error(Status.QUERY_DATASOURCE_BY_TYPE_ERROR.getCode(),QUERY_DATASOURCE_BY_TYPE_ERROR.getMsg());
}
}
/**
* verify udf function name can use or not
*
* @param loginUser login user
* @param name name
* @return true if the name can user, otherwise return false
*/
@ApiOperation(value = "verifyUdfFuncName", notes= "VERIFY_UDF_FUNCTION_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "name", value = "FUNC_NAME",required = true, dataType ="String")
})
@GetMapping(value = "/udf-func/verify-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyUdfFuncName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="name") String name
) {
logger.info("login user {}, verfiy udf function name: {}",
loginUser.getUserName(),name);
try{
return udfFuncService.verifyUdfFuncByName(name);
}catch (Exception e){
logger.error(VERIFY_UDF_FUNCTION_NAME_ERROR.getMsg(),e);
return error(Status.VERIFY_UDF_FUNCTION_NAME_ERROR.getCode(), Status.VERIFY_UDF_FUNCTION_NAME_ERROR.getMsg());
}
}
/**
* delete udf function
*
* @param loginUser login user
* @param udfFuncId udf function id
* @return delete result code
*/
@ApiOperation(value = "deleteUdfFunc", notes= "DELETE_UDF_FUNCTION_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/udf-func/delete")
@ResponseStatus(HttpStatus.OK)
public Result deleteUdfFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="id") int udfFuncId
) {
try{
logger.info("login user {}, delete udf function id: {}", loginUser.getUserName(),udfFuncId);
return udfFuncService.delete(udfFuncId);
}catch (Exception e){
logger.error(DELETE_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.DELETE_UDF_FUNCTION_ERROR.getCode(), Status.DELETE_UDF_FUNCTION_ERROR.getMsg());
}
}
/**
* authorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
@ApiOperation(value = "authorizedFile", notes= "AUTHORIZED_FILE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/authed-file")
@ResponseStatus(HttpStatus.CREATED)
public Result authorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("authorized file resource, user: {}, user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedFile(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(AUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e);
return error(Status.AUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.AUTHORIZED_FILE_RESOURCE_ERROR.getMsg());
}
}
/**
* unauthorized file resource list
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthorizedFile", notes= "UNAUTHORIZED_FILE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/unauth-file")
@ResponseStatus(HttpStatus.CREATED)
public Result unauthorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("resource unauthorized file, user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedFile(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg());
}
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
@ApiOperation(value = "unauthUDFFunc", notes= "UNAUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/unauth-udf-func")
@ResponseStatus(HttpStatus.CREATED)
public Result unauthUDFFunc(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("unauthorized udf function, login user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.unauthorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(UNAUTHORIZED_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.UNAUTHORIZED_UDF_FUNCTION_ERROR.getCode(), Status.UNAUTHORIZED_UDF_FUNCTION_ERROR.getMsg());
}
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
@ApiOperation(value = "authUDFFunc", notes= "AUTHORIZED_UDF_FUNC_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
@GetMapping(value = "/authed-udf-func")
@ResponseStatus(HttpStatus.CREATED)
public Result authorizedUDFFunction(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
logger.info("auth udf function, login user:{}, auth user id:{}", loginUser.getUserName(), userId);
Map<String, Object> result = resourceService.authorizedUDFFunction(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
logger.error(AUTHORIZED_UDF_FUNCTION_ERROR.getMsg(),e);
return error(Status.AUTHORIZED_UDF_FUNCTION_ERROR.getCode(), Status.AUTHORIZED_UDF_FUNCTION_ERROR.getMsg());
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,441 | [Feature] add user error when user name contains '.' | **Is your feature request related to a problem? Please describe.**
add a user when user name contains '.'
i think maybe the username regex string is error.
| https://github.com/apache/dolphinscheduler/issues/1441 | https://github.com/apache/dolphinscheduler/pull/1445 | b276ece5727bdf7cecc07bfc888f27cb75726b65 | a1ded5ee041a387d56757fcdd27e39efd3a6000a | "2019-12-11T03:07:47Z" | java | "2019-12-12T06:19:55Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import java.util.regex.Pattern;
/**
* Constants
*/
public final class Constants {
/**
* zookeeper properties path
*/
public static final String ZOOKEEPER_PROPERTIES_PATH = "zookeeper.properties";
/**
* hadoop properties path
*/
public static final String HADOOP_PROPERTIES_PATH = "/common/hadoop/hadoop.properties";
/**
* common properties path
*/
public static final String COMMON_PROPERTIES_PATH = "/common/common.properties";
/**
* dao properties path
*/
public static final String DAO_PROPERTIES_PATH = "application.properties";
/**
* fs.defaultFS
*/
public static final String FS_DEFAULTFS = "fs.defaultFS";
/**
* fs s3a endpoint
*/
public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint";
/**
* fs s3a access key
*/
public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key";
/**
* fs s3a secret key
*/
public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key";
/**
* yarn.resourcemanager.ha.rm.idsfs.defaultFS
*/
public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids";
/**
* yarn.application.status.address
*/
public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address";
/**
* hdfs configuration
* hdfs.root.user
*/
public static final String HDFS_ROOT_USER = "hdfs.root.user";
/**
* hdfs configuration
* data.store2hdfs.basepath
*/
public static final String DATA_STORE_2_HDFS_BASEPATH = "data.store2hdfs.basepath";
/**
* data.basedir.path
*/
public static final String DATA_BASEDIR_PATH = "data.basedir.path";
/**
* data.download.basedir.path
*/
public static final String DATA_DOWNLOAD_BASEDIR_PATH = "data.download.basedir.path";
/**
* process.exec.basepath
*/
public static final String PROCESS_EXEC_BASEPATH = "process.exec.basepath";
/**
* dolphinscheduler.env.path
*/
public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path";
/**
* python home
*/
public static final String PYTHON_HOME="PYTHON_HOME";
/**
* resource.view.suffixs
*/
public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs";
/**
* development.state
*/
public static final String DEVELOPMENT_STATE = "development.state";
/**
* res.upload.startup.type
*/
public static final String RES_UPLOAD_STARTUP_TYPE = "res.upload.startup.type";
/**
* zookeeper quorum
*/
public static final String ZOOKEEPER_QUORUM = "zookeeper.quorum";
/**
* MasterServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_MASTERS = "zookeeper.dolphinscheduler.masters";
/**
* WorkerServer directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_WORKERS = "zookeeper.dolphinscheduler.workers";
/**
* all servers directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS = "zookeeper.dolphinscheduler.dead.servers";
/**
* MasterServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS = "zookeeper.dolphinscheduler.lock.masters";
/**
* WorkerServer lock directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_WORKERS = "zookeeper.dolphinscheduler.lock.workers";
/**
* MasterServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "zookeeper.dolphinscheduler.lock.failover.masters";
/**
* WorkerServer failover directory registered in zookeeper
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "zookeeper.dolphinscheduler.lock.failover.workers";
/**
* MasterServer startup failover runing and fault tolerance process
*/
public static final String ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "zookeeper.dolphinscheduler.lock.failover.startup.masters";
/**
* need send warn times when master server or worker server failover
*/
public static final int DOLPHINSCHEDULER_WARN_TIMES_FAILOVER = 3;
/**
* comma ,
*/
public static final String COMMA = ",";
/**
* COLON :
*/
public static final String COLON = ":";
/**
* SINGLE_SLASH /
*/
public static final String SINGLE_SLASH = "/";
/**
* DOUBLE_SLASH //
*/
public static final String DOUBLE_SLASH = "//";
/**
* SEMICOLON ;
*/
public static final String SEMICOLON = ";";
/**
* EQUAL SIGN
*/
public static final String EQUAL_SIGN = "=";
/**
* ZOOKEEPER_SESSION_TIMEOUT
*/
public static final String ZOOKEEPER_SESSION_TIMEOUT = "zookeeper.session.timeout";
public static final String ZOOKEEPER_CONNECTION_TIMEOUT = "zookeeper.connection.timeout";
public static final String ZOOKEEPER_RETRY_SLEEP = "zookeeper.retry.sleep";
public static final String ZOOKEEPER_RETRY_MAXTIME = "zookeeper.retry.maxtime";
public static final String MASTER_HEARTBEAT_INTERVAL = "master.heartbeat.interval";
public static final String MASTER_EXEC_THREADS = "master.exec.threads";
public static final String MASTER_EXEC_TASK_THREADS = "master.exec.task.number";
public static final String MASTER_COMMIT_RETRY_TIMES = "master.task.commit.retryTimes";
public static final String MASTER_COMMIT_RETRY_INTERVAL = "master.task.commit.interval";
public static final String WORKER_EXEC_THREADS = "worker.exec.threads";
public static final String WORKER_HEARTBEAT_INTERVAL = "worker.heartbeat.interval";
public static final String WORKER_FETCH_TASK_NUM = "worker.fetch.task.num";
public static final String WORKER_MAX_CPULOAD_AVG = "worker.max.cpuload.avg";
public static final String WORKER_RESERVED_MEMORY = "worker.reserved.memory";
public static final String MASTER_MAX_CPULOAD_AVG = "master.max.cpuload.avg";
public static final String MASTER_RESERVED_MEMORY = "master.reserved.memory";
/**
* dolphinscheduler tasks queue
*/
public static final String DOLPHINSCHEDULER_TASKS_QUEUE = "tasks_queue";
/**
* dolphinscheduler need kill tasks queue
*/
public static final String DOLPHINSCHEDULER_TASKS_KILL = "tasks_kill";
public static final String ZOOKEEPER_DOLPHINSCHEDULER_ROOT = "zookeeper.dolphinscheduler.root";
public static final String SCHEDULER_QUEUE_IMPL = "dolphinscheduler.queue.impl";
/**
* date format of yyyy-MM-dd HH:mm:ss
*/
public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss";
/**
* date format of yyyyMMddHHmmss
*/
public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss";
/**
* http connect time out
*/
public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000;
/**
* http connect request time out
*/
public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000;
/**
* httpclient soceket time out
*/
public static final int SOCKET_TIMEOUT = 60 * 1000;
/**
* http header
*/
public static final String HTTP_HEADER_UNKNOWN = "unKnown";
/**
* http X-Forwarded-For
*/
public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For";
/**
* http X-Real-IP
*/
public static final String HTTP_X_REAL_IP = "X-Real-IP";
/**
* UTF-8
*/
public static final String UTF_8 = "UTF-8";
/**
* user name regex
*/
public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9_-]{3,20}$");
/**
* email regex
*/
public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$");
/**
* read permission
*/
public static final int READ_PERMISSION = 2 * 1;
/**
* write permission
*/
public static final int WRITE_PERMISSION = 2 * 2;
/**
* execute permission
*/
public static final int EXECUTE_PERMISSION = 1;
/**
* default admin permission
*/
public static final int DEFAULT_ADMIN_PERMISSION = 7;
/**
* all permissions
*/
public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION;
/**
* max task timeout
*/
public static final int MAX_TASK_TIMEOUT = 24 * 3600;
/**
* heartbeat threads number
*/
public static final int defaulWorkerHeartbeatThreadNum = 1;
/**
* heartbeat interval
*/
public static final int defaultWorkerHeartbeatInterval = 60;
/**
* worker fetch task number
*/
public static final int defaultWorkerFetchTaskNum = 1;
/**
* worker execute threads number
*/
public static final int defaultWorkerExecThreadNum = 10;
/**
* master cpu load
*/
public static final int defaultMasterCpuLoad = Runtime.getRuntime().availableProcessors() * 2;
/**
* master reserved memory
*/
public static final double defaultMasterReservedMemory = OSUtils.totalMemorySize() / 10;
/**
* worker cpu load
*/
public static final int defaultWorkerCpuLoad = Runtime.getRuntime().availableProcessors() * 2;
/**
* worker reserved memory
*/
public static final double defaultWorkerReservedMemory = OSUtils.totalMemorySize() / 10;
/**
* master execute threads number
*/
public static final int defaultMasterExecThreadNum = 100;
/**
* default master concurrent task execute num
*/
public static final int defaultMasterTaskExecNum = 20;
/**
* default log cache rows num,output when reach the number
*/
public static final int defaultLogRowsNum = 4 * 16;
/**
* log flush interval,output when reach the interval
*/
public static final int defaultLogFlushInterval = 1000;
/**
* default master heartbeat thread number
*/
public static final int defaulMasterHeartbeatThreadNum = 1;
/**
* default master heartbeat interval
*/
public static final int defaultMasterHeartbeatInterval = 60;
/**
* default master commit retry times
*/
public static final int defaultMasterCommitRetryTimes = 5;
/**
* default master commit retry interval
*/
public static final int defaultMasterCommitRetryInterval = 100;
/**
* time unit secong to minutes
*/
public static final int SEC_2_MINUTES_TIME_UNIT = 60;
/***
*
* rpc port
*/
public static final int RPC_PORT = 50051;
/**
* forbid running task
*/
public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN";
/**
* task record configuration path
*/
public static final String APPLICATION_PROPERTIES = "application-dao.properties";
public static final String TASK_RECORD_URL = "task.record.datasource.url";
public static final String TASK_RECORD_FLAG = "task.record.flag";
public static final String TASK_RECORD_USER = "task.record.datasource.username";
public static final String TASK_RECORD_PWD = "task.record.datasource.password";
public static final String DEFAULT = "Default";
public static final String USER = "user";
public static final String PASSWORD = "password";
public static final String XXXXXX = "******";
public static final String NULL = "NULL";
public static String TASK_RECORD_TABLE_HIVE_LOG = "eamp_hive_log_hd";
public static String TASK_RECORD_TABLE_HISTORY_HIVE_LOG = "eamp_hive_hist_log_hd";
/**
* command parameter keys
*/
public static final String CMDPARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId";
public static final String CMDPARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList";
public static final String CMDPARAM_RECOVERY_WAITTING_THREAD = "WaittingThreadInstanceId";
public static final String CMDPARAM_SUB_PROCESS = "processInstanceId";
public static final String CMDPARAM_EMPTY_SUB_PROCESS = "0";
public static final String CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId";
public static final String CMDPARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId";
public static final String CMDPARAM_START_NODE_NAMES = "StartNodeNameList";
/**
* complement data start date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate";
/**
* complement data end date
*/
public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate";
/**
* hadoop configuration
*/
public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE";
public static final String HADOOP_RM_STATE_STANDBY = "STANDBY";
public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port";
/**
* data source config
*/
public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name";
public static final String SPRING_DATASOURCE_URL = "spring.datasource.url";
public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username";
public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout";
public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize";
public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle";
public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive";
public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis";
public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis";
public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis";
public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery";
public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle";
public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow";
public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn";
public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements";
public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit";
public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive";
public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize";
public static final String DEVELOPMENT = "development";
public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties";
/**
* sleep time
*/
public static final int SLEEP_TIME_MILLIS = 1000;
/**
* heartbeat for zk info length
*/
public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 7;
/**
* hadoop params constant
*/
/**
* jar
*/
public static final String JAR = "jar";
/**
* hadoop
*/
public static final String HADOOP = "hadoop";
/**
* -D parameter
*/
public static final String D = "-D";
/**
* -D mapreduce.job.queuename=ququename
*/
public static final String MR_QUEUE = "mapreduce.job.queuename";
/**
* spark params constant
*/
public static final String MASTER = "--master";
public static final String DEPLOY_MODE = "--deploy-mode";
/**
* --class CLASS_NAME
*/
public static final String MAIN_CLASS = "--class";
/**
* --driver-cores NUM
*/
public static final String DRIVER_CORES = "--driver-cores";
/**
* --driver-memory MEM
*/
public static final String DRIVER_MEMORY = "--driver-memory";
/**
* --num-executors NUM
*/
public static final String NUM_EXECUTORS = "--num-executors";
/**
* --executor-cores NUM
*/
public static final String EXECUTOR_CORES = "--executor-cores";
/**
* --executor-memory MEM
*/
public static final String EXECUTOR_MEMORY = "--executor-memory";
/**
* --queue QUEUE
*/
public static final String SPARK_QUEUE = "--queue";
/**
* --queue --qu
*/
public static final String FLINK_QUEUE = "--qu";
/**
* exit code success
*/
public static final int EXIT_CODE_SUCCESS = 0;
/**
* exit code kill
*/
public static final int EXIT_CODE_KILL = 137;
/**
* exit code failure
*/
public static final int EXIT_CODE_FAILURE = -1;
/**
* date format of yyyyMMdd
*/
public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd";
/**
* date format of yyyyMMddHHmmss
*/
public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss";
/**
* system date(yyyyMMddHHmmss)
*/
public static final String PARAMETER_DATETIME = "system.datetime";
/**
* system date(yyyymmdd) today
*/
public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate";
/**
* system date(yyyymmdd) yesterday
*/
public static final String PARAMETER_BUSINESS_DATE = "system.biz.date";
/**
* ACCEPTED
*/
public static final String ACCEPTED = "ACCEPTED";
/**
* SUCCEEDED
*/
public static final String SUCCEEDED = "SUCCEEDED";
/**
* NEW
*/
public static final String NEW = "NEW";
/**
* NEW_SAVING
*/
public static final String NEW_SAVING = "NEW_SAVING";
/**
* SUBMITTED
*/
public static final String SUBMITTED = "SUBMITTED";
/**
* FAILED
*/
public static final String FAILED = "FAILED";
/**
* KILLED
*/
public static final String KILLED = "KILLED";
/**
* RUNNING
*/
public static final String RUNNING = "RUNNING";
/**
* underline "_"
*/
public static final String UNDERLINE = "_";
/**
* quartz job prifix
*/
public static final String QUARTZ_JOB_PRIFIX = "job";
/**
* quartz job group prifix
*/
public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup";
/**
* projectId
*/
public static final String PROJECT_ID = "projectId";
/**
* processId
*/
public static final String SCHEDULE_ID = "scheduleId";
/**
* schedule
*/
public static final String SCHEDULE = "schedule";
/**
* application regex
*/
public static final String APPLICATION_REGEX = "application_\\d+_\\d+";
public static final String PID = "pid";
/**
* month_begin
*/
public static final String MONTH_BEGIN = "month_begin";
/**
* add_months
*/
public static final String ADD_MONTHS = "add_months";
/**
* month_end
*/
public static final String MONTH_END = "month_end";
/**
* week_begin
*/
public static final String WEEK_BEGIN = "week_begin";
/**
* week_end
*/
public static final String WEEK_END = "week_end";
/**
* timestamp
*/
public static final String TIMESTAMP = "timestamp";
public static final char SUBTRACT_CHAR = '-';
public static final char ADD_CHAR = '+';
public static final char MULTIPLY_CHAR = '*';
public static final char DIVISION_CHAR = '/';
public static final char LEFT_BRACE_CHAR = '(';
public static final char RIGHT_BRACE_CHAR = ')';
public static final String ADD_STRING = "+";
public static final String MULTIPLY_STRING = "*";
public static final String DIVISION_STRING = "/";
public static final String LEFT_BRACE_STRING = "(";
public static final char P = 'P';
public static final char N = 'N';
public static final String SUBTRACT_STRING = "-";
public static final String GLOBAL_PARAMS = "globalParams";
public static final String LOCAL_PARAMS = "localParams";
public static final String PROCESS_INSTANCE_STATE = "processInstanceState";
public static final String TASK_LIST = "taskList";
public static final String RWXR_XR_X = "rwxr-xr-x";
/**
* master/worker server use for zk
*/
public static final String MASTER_PREFIX = "master";
public static final String WORKER_PREFIX = "worker";
public static final String DELETE_ZK_OP = "delete";
public static final String ADD_ZK_OP = "add";
public static final String ALIAS = "alias";
public static final String CONTENT = "content";
public static final String DEPENDENT_SPLIT = ":||";
public static final String DEPENDENT_ALL = "ALL";
/**
* preview schedule execute count
*/
public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5;
/**
* kerberos
*/
public static final String KERBEROS = "kerberos";
/**
* java.security.krb5.conf
*/
public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf";
/**
* java.security.krb5.conf.path
*/
public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
/**
* hadoop.security.authentication
*/
public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state";
/**
* loginUserFromKeytab user
*/
public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username";
/**
* default worker group id
*/
public static final int DEFAULT_WORKER_ID = -1;
/**
* loginUserFromKeytab path
*/
public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path";
/**
* task log info format
*/
public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s";
/**
* hive conf
*/
public static final String HIVE_CONF = "hiveconf:";
//flink 任务
public static final String FLINK_YARN_CLUSTER = "yarn-cluster";
public static final String FLINK_RUN_MODE = "-m";
public static final String FLINK_YARN_SLOT = "-ys";
public static final String FLINK_APP_NAME = "-ynm";
public static final String FLINK_TASK_MANAGE = "-yn";
public static final String FLINK_JOB_MANAGE_MEM = "-yjm";
public static final String FLINK_TASK_MANAGE_MEM = "-ytm";
public static final String FLINK_detach = "-d";
public static final String FLINK_MAIN_CLASS = "-c";
public static final int[] NOT_TERMINATED_STATES = new int[]{
ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal(),
ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(),
ExecutionStatus.WAITTING_THREAD.ordinal(),
ExecutionStatus.WAITTING_DEPEND.ordinal()
};
/**
* status
*/
public static final String STATUS = "status";
/**
* message
*/
public static final String MSG = "msg";
/**
* data total
* 数据总数
*/
public static final String COUNT = "count";
/**
* page size
* 每页数据条数
*/
public static final String PAGE_SIZE = "pageSize";
/**
* current page no
* 当前页码
*/
public static final String PAGE_NUMBER = "pageNo";
/**
* result
*/
public static final String RESULT = "result";
/**
*
*/
public static final String DATA_LIST = "data";
public static final String TOTAL_LIST = "totalList";
public static final String CURRENT_PAGE = "currentPage";
public static final String TOTAL_PAGE = "totalPage";
public static final String TOTAL = "total";
/**
* session user
*/
public static final String SESSION_USER = "session.user";
public static final String SESSION_ID = "sessionId";
public static final String PASSWORD_DEFAULT = "******";
/**
* driver
*/
public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver";
public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver";
public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver";
public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver";
public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver";
public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver";
/**
* database type
*/
public static final String MYSQL = "MYSQL";
public static final String POSTGRESQL = "POSTGRESQL";
public static final String HIVE = "HIVE";
public static final String SPARK = "SPARK";
public static final String CLICKHOUSE = "CLICKHOUSE";
public static final String ORACLE = "ORACLE";
public static final String SQLSERVER = "SQLSERVER";
public static final String DB2 = "DB2";
/**
* jdbc url
*/
public static final String JDBC_MYSQL = "jdbc:mysql://";
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
public static final String JDBC_ORACLE = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
public static final String ADDRESS = "address";
public static final String DATABASE = "database";
public static final String JDBC_URL = "jdbcUrl";
public static final String PRINCIPAL = "principal";
public static final String OTHER = "other";
/**
* session timeout
*/
public static final int SESSION_TIME_OUT = 7200;
public static final int maxFileSize = 1024 * 1024 * 1024;
public static final String UDF = "UDF";
public static final String CLASS = "class";
public static final String RECEIVERS = "receivers";
public static final String RECEIVERS_CC = "receiversCc";
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,442 | [BUG] code comment error |
**Describe the bug**
in this class: org.apache.dolphinscheduler.server.master.runner.MasterSchedulerThread. the line number 104, zkMasterClient.getMasterLockPath() The lock path obtained may should be /dolphinscheduler/lock/masters, but in the comment, is's /dolphinscheduler/lock/failover/master, I think it's easy to mislead people who read the source code, please check this, if it does, I can open a pr to fix it
**To Reproduce**
None
**Expected behavior**
Node
**Screenshots**
None
**Which version of Dolphin Scheduler:**
latest version
**Additional context**
None
**Requirement or improvement
None
| https://github.com/apache/dolphinscheduler/issues/1442 | https://github.com/apache/dolphinscheduler/pull/1446 | a1ded5ee041a387d56757fcdd27e39efd3a6000a | 9a3a6f5a3d5ec613b49ae1a5a947d58c351f1fc7 | "2019-12-11T03:47:22Z" | java | "2019-12-12T06:20:27Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import org.apache.curator.framework.imps.CuratorFrameworkState;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.common.zk.AbstractZKClient;
import org.apache.dolphinscheduler.dao.ProcessDao;
import org.apache.dolphinscheduler.dao.entity.Command;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.SpringApplicationContext;
import org.apache.dolphinscheduler.server.zk.ZKMasterClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
/**
* master scheduler thread
*/
public class MasterSchedulerThread implements Runnable {
/**
* logger of MasterSchedulerThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterSchedulerThread.class);
/**
* master exec service
*/
private final ExecutorService masterExecService;
/**
* dolphinscheduler database interface
*/
private final ProcessDao processDao;
/**
* zookeeper master client
*/
private final ZKMasterClient zkMasterClient ;
/**
* master exec thread num
*/
private int masterExecThreadNum;
/**
* master config
*/
private MasterConfig masterConfig;
/**
* constructor of MasterSchedulerThread
* @param zkClient zookeeper master client
* @param processDao process dao
* @param masterExecThreadNum master exec thread num
*/
public MasterSchedulerThread(ZKMasterClient zkClient, ProcessDao processDao, int masterExecThreadNum){
this.processDao = processDao;
this.zkMasterClient = zkClient;
this.masterExecThreadNum = masterExecThreadNum;
this.masterExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Exec-Thread",masterExecThreadNum);
this.masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
}
/**
* run of MasterSchedulerThread
*/
@Override
public void run() {
while (Stopper.isRunning()){
// process instance
ProcessInstance processInstance = null;
InterProcessMutex mutex = null;
try {
if(OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory())){
if (zkMasterClient.getZkClient().getState() == CuratorFrameworkState.STARTED) {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master
String znodeLock = zkMasterClient.getMasterLockPath();
mutex = new InterProcessMutex(zkMasterClient.getZkClient(), znodeLock);
mutex.acquire();
ThreadPoolExecutor poolExecutor = (ThreadPoolExecutor) masterExecService;
int activeCount = poolExecutor.getActiveCount();
// make sure to scan and delete command table in one transaction
Command command = processDao.findOneCommand();
if (command != null) {
logger.info(String.format("find one command: id: %d, type: %s", command.getId(),command.getCommandType().toString()));
try{
processInstance = processDao.handleCommand(logger, OSUtils.getHost(), this.masterExecThreadNum - activeCount, command);
if (processInstance != null) {
logger.info("start master exec thread , split DAG ...");
masterExecService.execute(new MasterExecThread(processInstance,processDao));
}
}catch (Exception e){
logger.error("scan command error ", e);
processDao.moveToErrorCommand(command, e.toString());
}
}
}
}
// accessing the command table every SLEEP_TIME_MILLIS milliseconds
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}catch (Exception e){
logger.error("master scheduler thread exception : " + e.getMessage(),e);
}finally{
AbstractZKClient.releaseMutex(mutex);
}
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,968 | [BUG] The task details window did not close properly. |
**Describe the bug**
The task details window did not close properly.
**To Reproduce**
1. Go to the page of the definition create or definition edit.
2. Drag to create a new task node or edit an exist node. And the task details window will be open.
3. Delete the task node or click the DataSource menu.
4. See error. The task details window still open.
**Which version of Dolphin Scheduler:**
-[dev]
**Requirement or improvement
The task details window should be close. | https://github.com/apache/dolphinscheduler/issues/1968 | https://github.com/apache/dolphinscheduler/pull/1969 | d7d7e95498cd69132660c7287052bb328a76f4ec | afd5c75cd27c9fe1a7efcafbed92bf28fc3996e5 | "2020-02-17T03:36:53Z" | java | "2020-02-17T11:09:10Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import _ from 'lodash'
import { jsPlumb } from 'jsplumb'
import JSP from './plugIn/jsPlumbHandle'
import DownChart from './plugIn/downChart'
import store from '@/conf/home/store'
/**
* Prototype method
*/
let Dag = function () {
this.dag = {}
this.instance = {}
}
/**
* init
* @dag dag vue instance
*/
Dag.prototype.init = function ({ dag, instance }) {
this.dag = dag
this.instance = instance
}
/**
* set init config
*/
Dag.prototype.setConfig = function (o) {
JSP.setConfig(o)
}
/**
* create dag
*/
Dag.prototype.create = function () {
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance
})
// init event
JSP.handleEvent()
// init draggable
JSP.draggable()
})
}
/**
* Action event on the right side of the toolbar
*/
Dag.prototype.toolbarEvent = function ({ item, code, is }) {
switch (code) {
case 'pointer':
JSP.handleEventPointer(is)
break
case 'line':
JSP.handleEventLine(is)
break
case 'remove':
JSP.handleEventRemove()
break
case 'screen':
JSP.handleEventScreen({ item, is })
break
case 'download':
DownChart.download({
dagThis: this.dag
})
break
}
}
/**
* Echo data display
*/
Dag.prototype.backfill = function (arg) {
if(arg) {
let locationsValue = store.state.dag.locations
let locationsValue1 = store.state.dag.locations
let locationsValue2 = store.state.dag.locations
let arr = []
for (let i in locationsValue1) {
let objs = new Object();
objs.id = i
arr.push(Object.assign(objs,locationsValue1[i])); //Attributes
}
let tmp = []
for(let i in locationsValue2) {
if(locationsValue2[i].targetarr !='' && locationsValue2[i].targetarr.split(',').length>1) {
tmp.push(locationsValue2[i])
}
}
function copy (array) {
let newArray = []
for(let item of array) {
newArray.push(item);
}
return newArray;
}
let newArr = copy(arr)
function getNewArr() {
for(let i= 0; i<newArr.length; i++) {
if(newArr[i].targetarr !='' && newArr[i].targetarr.split(',').length>1) {
newArr[i].targetarr = newArr[i].targetarr.split(',').shift()
}
}
return newArr
}
getNewArr()
/**
* @description Transform flat data into a tree structure
* @param {Array} arr Flat data
* @param {String} pidStr targetarr key name
* @param {String} idStr id key name
* @param {String} childrenStr children key name
*/
function fommat({arrayList, pidStr = 'targetarr', idStr = 'id', childrenStr = 'children'}) {
let listOjb = {}; // Used to store objects of the form {key: obj}
let treeList = []; // An array to store the final tree structure data
// Transform the data into {key: obj} format, which is convenient for the following data processing
for (let i = 0; i < arrayList.length; i++) {
listOjb[arrayList[i][idStr]] = arrayList[i]
}
// Format data based on pid
for (let j = 0; j < arrayList.length; j++) {
// Determine if the parent exists
// let haveParent = arrayList[j].targetarr.split(',').length>1?listOjb[arrayList[j].targetarr.split(',')[0]]:listOjb[arrayList[j][pidStr]]
let haveParent = listOjb[arrayList[j][pidStr]]
if (haveParent) {
// If there is no parent children field, create a children field
!haveParent[childrenStr] && (haveParent[childrenStr] = [])
// Insert child in parent
haveParent[childrenStr].push(arrayList[j])
} else {
// If there is no parent, insert directly into the outermost layer
treeList.push(arrayList[j])
}
}
return treeList
}
let datas = fommat({arrayList: newArr,pidStr: 'targetarr'})
// Count the number of leaf nodes
function getLeafCountTree(json) {
if(!json.children) {
json.colspan = 1;
return 1;
} else {
let leafCount = 0;
for(let i = 0 ; i < json.children.length ; i++){
leafCount = leafCount + getLeafCountTree(json.children[i]);
}
json.colspan = leafCount;
return leafCount;
}
}
// Number of tree node levels
let countTree = getLeafCountTree(datas[0])
function getMaxFloor(treeData) {
let max = 0
function each (data, floor) {
data.forEach(e => {
e.floor = floor
e.x=floor*170
if (floor > max) {
max = floor
}
if (e.children) {
each(e.children, floor + 1)
}
})
}
each(treeData,1)
return max
}
getMaxFloor(datas)
// The last child of each node
let lastchildren = [];
forxh(datas);
function forxh(list) {
for (let i = 0; i < list.length; i++) {
let chlist = list[i];
if (chlist.children) {
forxh(chlist.children);
} else {
lastchildren.push(chlist);
}
}
}
// Get all parent nodes above the leaf node
function treeFindPath (tree, func, path,n) {
if (!tree) return []
for (const data of tree) {
path.push(data.name)
if (func(data)) return path
if (data.children) {
const findChildren = treeFindPath(data.children, func, path,n)
if (findChildren.length) return findChildren
}
path.pop()
}
return []
}
function toLine(data){
return data.reduce((arrData, {id, name, targetarr, x, y, children = []}) =>
arrData.concat([{id, name, targetarr, x, y}], toLine(children)), [])
}
let listarr = toLine(datas);
let listarrs = toLine(datas)
let dataObject = {}
for(let i = 0; i<listarrs.length; i++) {
delete(listarrs[i].id)
}
for(let a = 0; a<listarr.length; a++) {
dataObject[listarr[a].id] = listarrs[a]
}
// Comparison function
function createComparisonFunction(propertyName) {
return function (object1,object2) {
let value1 = object1[propertyName];
let value2 = object2[propertyName];
if (value1 < value2) {
return -1;
} else if (value1 > value2) {
return 1;
} else {
return 0;
}
};
}
lastchildren = lastchildren.sort(createComparisonFunction('x'))
// Coordinate value of each leaf node
for(let a = 0; a<lastchildren.length; a++) {
dataObject[lastchildren[a].id].y = (a+1)*120
}
for(let i =0 ; i<lastchildren.length; i++) {
let node = treeFindPath(datas, data=> data.targetarr===lastchildren[i].targetarr,[],i+1)
for(let j = 0; j<node.length; j++) {
for(let k= 0; k<listarrs.length; k++) {
if(node[j] == listarrs[k].name) {
listarrs[k].y = (i+1)*120
}
}
}
}
for(let i = 0; i<tmp.length; i++) {
for(let objs in dataObject) {
if(tmp[i].name == dataObject[objs].name) {
dataObject[objs].targetarr = tmp[i].targetarr
}
}
}
for(let a = 0; a<lastchildren.length; a++) {
dataObject[lastchildren[a].id].y = (a+1)*120
}
if(countTree>1) {
dataObject[Object.keys(locationsValue1)[0]].y = (countTree/2)*120+50
}
locationsValue = dataObject
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance
})
// Backfill
JSP.jspBackfill({
// connects
connects: _.cloneDeep(store.state.dag.connects),
// Node location information
locations: _.cloneDeep(locationsValue),
// Node data
largeJson: _.cloneDeep(store.state.dag.tasks)
})
})
} else {
jsPlumb.ready(() => {
JSP.init({
dag: this.dag,
instance: this.instance
})
// Backfill
JSP.jspBackfill({
// connects
connects: _.cloneDeep(store.state.dag.connects),
// Node location information
locations: _.cloneDeep(store.state.dag.locations),
// Node data
largeJson: _.cloneDeep(store.state.dag.tasks)
})
})
}
}
/**
* Get dag storage format data
*/
Dag.prototype.saveStore = function () {
return JSP.saveStore()
}
export default new Dag()
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,968 | [BUG] The task details window did not close properly. |
**Describe the bug**
The task details window did not close properly.
**To Reproduce**
1. Go to the page of the definition create or definition edit.
2. Drag to create a new task node or edit an exist node. And the task details window will be open.
3. Delete the task node or click the DataSource menu.
4. See error. The task details window still open.
**Which version of Dolphin Scheduler:**
-[dev]
**Requirement or improvement
The task details window should be close. | https://github.com/apache/dolphinscheduler/issues/1968 | https://github.com/apache/dolphinscheduler/pull/1969 | d7d7e95498cd69132660c7287052bb328a76f4ec | afd5c75cd27c9fe1a7efcafbed92bf28fc3996e5 | "2020-02-17T03:36:53Z" | java | "2020-02-17T11:09:10Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/dag.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="clearfix dag-model" >
<div class="toolbar">
<div class="title"><span>{{$t('Toolbar')}}</span></div>
<div class="toolbar-btn">
<div class="bar-box roundedRect jtk-draggable jtk-droppable jtk-endpoint-anchor jtk-connected"
:class="v === dagBarId ? 'active' : ''"
:id="v"
:key="v"
v-for="(item,v) in tasksTypeList"
@mousedown="_getDagId(v)">
<div data-toggle="tooltip" :title="item.description">
<div class="icos" :class="'icos-' + v" ></div>
</div>
</div>
</div>
</div>
<div class="dag-contect">
<div class="dag-toolbar">
<div class="assist-btn">
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('View variables')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleView"
icon="ans-icon-code">
</x-button>
<x-button
style="vertical-align: middle;"
data-toggle="tooltip"
:title="$t('Startup parameter')"
data-container="body"
type="primary"
size="xsmall"
:disabled="$route.name !== 'projects-instance-details'"
@click="_toggleParam"
icon="ans-icon-arrow-circle-right">
</x-button>
<span class="name">{{name}}</span>
<span v-if="name" class="copy-name" @click="_copyName" :data-clipboard-text="name"><em class="ans-icon-copy" data-container="body" data-toggle="tooltip" :title="$t('Copy name')" ></em></span>
</div>
<div class="save-btn">
<div class="operation" style="vertical-align: middle;">
<a href="javascript:"
v-for="(item,$index) in toolOperList"
:class="_operationClass(item)"
:id="item.code"
:key="$index"
@click="_ckOperation(item,$event)">
<x-button type="text" data-container="body" :icon="item.icon" v-tooltip.light="item.desc"></x-button>
</a>
</div>
<x-button
type="primary"
v-tooltip.light="$t('Format DAG')"
icon="ans-icon-triangle-solid-right"
size="xsmall"
data-container="body"
v-if="type === 'instance'"
style="vertical-align: middle;"
@click="dagAutomaticLayout">
</x-button>
<x-button
v-tooltip.light="$t('Refresh DAG status')"
data-container="body"
style="vertical-align: middle;"
icon="ans-icon-refresh"
type="primary"
:loading="isRefresh"
v-if="type === 'instance'"
@click="!isRefresh && _refresh()"
size="xsmall" >
</x-button>
<x-button
v-if="isRtTasks"
style="vertical-align: middle;"
type="primary"
size="xsmall"
icon="ans-icon-play"
@click="_rtNodesDag" >
{{$t('Return_1')}}
</x-button>
<x-button
style="vertical-align: middle;"
type="primary"
size="xsmall"
:loading="spinnerLoading"
@click="_saveChart"
icon="ans-icon-save"
>
{{spinnerLoading ? 'Loading...' : $t('Save')}}
</x-button>
</div>
</div>
<div class="scrollbar dag-container">
<div class="jtk-demo" id="jtk-demo">
<div class="jtk-demo-canvas canvas-wide statemachine-demo jtk-surface jtk-surface-nopan jtk-draggable" id="canvas" ></div>
</div>
</div>
</div>
</div>
</template>
<script>
import _ from 'lodash'
import Dag from './dag'
import mUdp from './udp/udp'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import Clipboard from 'clipboard'
import { allNodesId } from './plugIn/util'
import { toolOper, tasksType } from './config'
import mFormModel from './formModel/formModel'
import { formatDate } from '@/module/filter/filter'
import { findComponentDownward } from '@/module/util/'
import disabledState from '@/module/mixin/disabledState'
import { mapActions, mapState, mapMutations } from 'vuex'
let eventModel
export default {
name: 'dag-chart',
data () {
return {
tasksTypeList: tasksType,
toolOperList: toolOper(this),
dagBarId: null,
toolOperCode: '',
spinnerLoading: false,
urlParam: {
id: this.$route.params.id || null
},
isRtTasks: false,
isRefresh: false,
isLoading: false,
taskId: null,
arg: false,
}
},
mixins: [disabledState],
props: {
type: String,
releaseState: String
},
methods: {
...mapActions('dag', ['saveDAGchart', 'updateInstance', 'updateDefinition', 'getTaskState']),
...mapMutations('dag', ['addTasks', 'cacheTasks', 'resetParams', 'setIsEditDag', 'setName']),
// DAG automatic layout
dagAutomaticLayout() {
$('#canvas').html('')
// Destroy round robin
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Straight',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
]
],
Container: 'canvas'
})
})
if (this.tasks.length) {
Dag.backfill(true)
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
}
} else {
Dag.create()
}
},
init (args) {
if (this.tasks.length) {
Dag.backfill(args)
// Process instances can view status
if (this.type === 'instance') {
this._getTaskState(false).then(res => {})
// Round robin acquisition status
this.setIntervalP = setInterval(() => {
this._getTaskState(true).then(res => {})
}, 90000)
}
} else {
Dag.create()
}
},
/**
* copy name
*/
_copyName(){
let clipboard = new Clipboard(`.copy-name`)
clipboard.on('success', e => {
this.$message.success(`${i18n.$t('Copy success')}`)
// Free memory
clipboard.destroy()
})
clipboard.on('error', e => {
// Copy is not supported
this.$message.warning(`${i18n.$t('The browser does not support automatic copying')}`)
// Free memory
clipboard.destroy()
})
},
/**
* Get state interface
* @param isReset Whether to manually refresh
*/
_getTaskState (isReset) {
return new Promise((resolve, reject) => {
this.getTaskState(this.urlParam.id).then(res => {
let data = res.list
let state = res.processInstanceState
let taskList = res.taskList
let idArr = allNodesId()
const titleTpl = (item, desc) => {
let $item = _.filter(taskList, v => v.name === item.name)[0]
return `<div style="text-align: left">${i18n.$t('Name')}:${$item.name}</br>${i18n.$t('State')}:${desc}</br>${i18n.$t('type')}:${$item.taskType}</br>${i18n.$t('host')}:${$item.host || '-'}</br>${i18n.$t('Retry Count')}:${$item.retryTimes}</br>${i18n.$t('Submit Time')}:${formatDate($item.submitTime)}</br>${i18n.$t('Start Time')}:${formatDate($item.startTime)}</br>${i18n.$t('End Time')}:${$item.endTime ? formatDate($item.endTime) : '-'}</br></div>`
}
// remove tip state dom
$('.w').find('.state-p').html('')
data.forEach(v1 => {
idArr.forEach(v2 => {
if (v2.name === v1.name) {
let dom = $(`#${v2.id}`)
let state = dom.find('.state-p')
dom.attr('data-state-id', v1.stateId)
dom.attr('data-dependent-result', v1.dependentResult || '')
state.append(`<strong class="${v1.icoUnicode} ${v1.isSpin ? 'as as-spin' : ''}" style="color:${v1.color}" data-toggle="tooltip" data-html="true" data-container="body"></strong>`)
state.find('strong').attr('title', titleTpl(v2, v1.desc))
}
})
})
if (state === 'PAUSE' || state === 'STOP' || state === 'FAILURE' || this.state === 'SUCCESS') {
// Manual refresh does not regain large json
if (isReset) {
findComponentDownward(this.$root, `${this.type}-details`)._reset()
}
}
resolve()
})
})
},
/**
* Get the action bar id
* @param item
*/
_getDagId (v) {
// if (this.isDetails) {
// return
// }
this.dagBarId = v
},
/**
* operating
*/
_ckOperation (item) {
let is = true
let code = ''
if (!item.disable) {
return
}
if (this.toolOperCode === item.code) {
this.toolOperCode = ''
code = item.code
is = false
} else {
this.toolOperCode = item.code
code = this.toolOperCode
is = true
}
// event type
Dag.toolbarEvent({
item: item,
code: code,
is: is
})
},
_operationClass (item) {
return this.toolOperCode === item.code ? 'active' : ''
// if (item.disable) {
// return this.toolOperCode === item.code ? 'active' : ''
// } else {
// return 'disable'
// }
},
/**
* Storage interface
*/
_save (sourceType) {
return new Promise((resolve, reject) => {
this.spinnerLoading = true
// Storage store
Dag.saveStore().then(res => {
if (this.urlParam.id) {
/**
* Edit
* @param saveInstanceEditDAGChart => Process instance editing
* @param saveEditDAGChart => Process definition editing
*/
this[this.type === 'instance' ? 'updateInstance' : 'updateDefinition'](this.urlParam.id).then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.spinnerLoading = false
reject(e)
})
} else {
// New
this.saveDAGchart().then(res => {
this.$message.success(res.msg)
this.spinnerLoading = false
// source @/conf/home/pages/dag/_source/editAffirmModel/index.js
if (sourceType !== 'affirm') {
// Jump process definition
this.$router.push({ name: 'projects-definition-list' })
}
resolve()
}).catch(e => {
this.$message.error(e.msg || '')
this.setName('')
this.spinnerLoading = false
reject(e)
})
}
})
})
},
/**
* Global parameter
* @param Promise
*/
_udpTopFloorPop () {
return new Promise((resolve, reject) => {
let modal = this.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mUdp, {
on: {
onUdp () {
modal.remove()
resolve()
},
close () {
modal.remove()
}
}
})
}
})
})
},
/**
* Save chart
*/
_saveChart () {
// Verify node
if (!this.tasks.length) {
this.$message.warning(`${i18n.$t('Failed to create node to save')}`)
return
}
// Global parameters (optional)
this._udpTopFloorPop().then(() => {
return this._save()
})
},
/**
* Return to the previous child node
*/
_rtNodesDag () {
let getIds = this.$route.query.subProcessIds
let idsArr = getIds.split(',')
let ids = idsArr.slice(0, idsArr.length - 1)
let id = idsArr[idsArr.length - 1]
let query = {}
if (id !== idsArr[0]) {
query = { subProcessIds: ids.join(',') }
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${id}`, query: query })
},
/**
* Subprocess processing
* @param subProcessId Subprocess ID
*/
_subProcessHandle (subProcessId) {
let subProcessIds = []
let getIds = this.$route.query.subProcessIds
if (getIds) {
let newId = getIds.split(',')
newId.push(this.urlParam.id)
subProcessIds = newId
} else {
subProcessIds.push(this.urlParam.id)
}
let $name = this.$route.name.split('-')
this.$router.push({ path: `/${$name[0]}/${$name[1]}/list/${subProcessId}`, query: { subProcessIds: subProcessIds.join(',') } })
},
/**
* Refresh data
*/
_refresh () {
this.isRefresh = true
this._getTaskState(false).then(res => {
setTimeout(() => {
this.isRefresh = false
this.$message.success(`${i18n.$t('Refresh status succeeded')}`)
}, 2200)
})
},
/**
* View variables
*/
_toggleView () {
findComponentDownward(this.$root, `assist-dag-index`)._toggleView()
},
/**
* Starting parameters
*/
_toggleParam () {
findComponentDownward(this.$root, `starting-params-dag-index`)._toggleParam()
},
/**
* Create a node popup layer
* @param Object id
*/
_createNodes ({ id, type }) {
let self = this
if (eventModel) {
eventModel.remove()
}
const removeNodesEvent = (fromThis) => {
// Manually destroy events inside the component
fromThis.$destroy()
// Close the popup
eventModel.remove()
}
this.taskId = id
eventModel = this.$drawer({
closable: false,
direction: 'right',
escClose: true,
render: h => h(mFormModel, {
on: {
addTaskInfo ({ item, fromThis }) {
self.addTasks(item)
setTimeout(() => {
removeNodesEvent(fromThis)
}, 100)
},
/**
* Cache the item
* @param item
* @param fromThis
*/
cacheTaskInfo({item, fromThis}) {
self.cacheTasks(item)
},
close ({ flag, fromThis }) {
// Edit status does not allow deletion of nodes
if (flag) {
jsPlumb.remove(id)
}
removeNodesEvent(fromThis)
},
onSubProcess ({ subProcessId, fromThis }) {
removeNodesEvent(fromThis)
self._subProcessHandle(subProcessId)
}
},
props: {
id: id,
taskType: type || self.dagBarId,
self: self
}
})
})
}
},
watch: {
'tasks': {
deep: true,
handler (o) {
// Edit state does not allow deletion of node a...
this.setIsEditDag(true)
}
}
},
created () {
// Edit state does not allow deletion of node a...
this.setIsEditDag(false)
if (this.$route.query.subProcessIds) {
this.isRtTasks = true
}
Dag.init({
dag: this,
instance: jsPlumb.getInstance({
Endpoint: [
'Dot', { radius: 1, cssClass: 'dot-style' }
],
Connector: 'Straight',
PaintStyle: { lineWidth: 2, stroke: '#456' }, // Connection style
ConnectionOverlays: [
[
'Arrow',
{
location: 1,
id: 'arrow',
length: 12,
foldback: 0.8
}
]
],
Container: 'canvas'
})
})
},
mounted () {
this.init(this.arg)
},
beforeDestroy () {
this.resetParams()
// Destroy round robin
clearInterval(this.setIntervalP)
},
destroyed () {
},
computed: {
...mapState('dag', ['tasks', 'locations', 'connects', 'isEditDag', 'name'])
},
components: {}
}
</script>
<style lang="scss" rel="stylesheet/scss">
@import "./dag";
</style>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,968 | [BUG] The task details window did not close properly. |
**Describe the bug**
The task details window did not close properly.
**To Reproduce**
1. Go to the page of the definition create or definition edit.
2. Drag to create a new task node or edit an exist node. And the task details window will be open.
3. Delete the task node or click the DataSource menu.
4. See error. The task details window still open.
**Which version of Dolphin Scheduler:**
-[dev]
**Requirement or improvement
The task details window should be close. | https://github.com/apache/dolphinscheduler/issues/1968 | https://github.com/apache/dolphinscheduler/pull/1969 | d7d7e95498cd69132660c7287052bb328a76f4ec | afd5c75cd27c9fe1a7efcafbed92bf28fc3996e5 | "2020-02-17T03:36:53Z" | java | "2020-02-17T11:09:10Z" | dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import 'jquery-ui/ui/widgets/draggable'
import 'jquery-ui/ui/widgets/droppable'
import 'jquery-ui/ui/widgets/resizable'
import Vue from 'vue'
import _ from 'lodash'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import DragZoom from './dragZoom'
import store from '@/conf/home/store'
import router from '@/conf/home/router'
import Permissions from '@/module/permissions'
import { uuid, findComponentDownward } from '@/module/util/'
import {
tasksAll,
rtTasksTpl,
setSvgColor,
saveTargetarr,
rtTargetarrArr } from './util'
import mStart from '@/conf/home/pages/projects/pages/definition/pages/list/_source/start'
let JSP = function () {
this.dag = {}
this.selectedElement = {}
this.config = {
// Whether to drag
isDrag: true,
// Whether to allow connection
isAttachment: false,
// Whether to drag a new node
isNewNodes: true,
// Whether to support double-click node events
isDblclick: true,
// Whether to support right-click menu events
isContextmenu: true,
// Whether to allow click events
isClick: false
}
}
/**
* dag init
*/
JSP.prototype.init = function ({ dag, instance }) {
// Get the dag component instance
this.dag = dag
// Get jsplumb instance
this.JspInstance = instance
// Register jsplumb connection type and configuration
this.JspInstance.registerConnectionType('basic', {
anchor: 'Continuous',
connector: 'Straight' // Line type
})
// Initial configuration
this.setConfig({
isDrag: !store.state.dag.isDetails,
isAttachment: false,
isNewNodes: !store.state.dag.isDetails,//Permissions.getAuth() === false ? false : !store.state.dag.isDetails,
isDblclick: true,
isContextmenu: true,
isClick: false
})
// Monitor line click
this.JspInstance.bind('click', e => {
if (this.config.isClick) {
this.connectClick(e)
}
})
// Drag and drop
if (this.config.isNewNodes) {
DragZoom.init()
}
}
/**
* set config attribute
*/
JSP.prototype.setConfig = function (o) {
this.config = Object.assign(this.config, {}, o)
}
/**
* Node binding event
*/
JSP.prototype.tasksEvent = function (selfId) {
let tasks = $(`#${selfId}`)
// Bind right event
tasks.on('contextmenu', e => {
this.tasksContextmenu(e)
return false
})
// Binding double click event
tasks.find('.icos').bind('dblclick', e => {
this.tasksDblclick(e)
})
// Binding click event
tasks.on('click', e => {
this.tasksClick(e)
})
}
/**
* Dag node drag and drop processing
*/
JSP.prototype.draggable = function () {
if (this.config.isNewNodes) {
let selfId
let self = this
$('.toolbar-btn .roundedRect').draggable({
scope: 'plant',
helper: 'clone',
containment: $('.dag-model'),
stop: function (e, ui) {
self.tasksEvent(selfId)
// Dom structure is not generated without pop-up form form
if ($(`#${selfId}`).html()) {
// dag event
findComponentDownward(self.dag.$root, 'dag-chart')._createNodes({
id: selfId
})
}
},
drag: function () {
$('body').find('.tooltip.fade.top.in').remove()
}
})
$('#canvas').droppable({
scope: 'plant',
drop: function (ev, ui) {
let id = 'tasks-' + Math.ceil(Math.random() * 100000) // eslint-disable-line
// Get mouse coordinates
let left = parseInt(ui.offset.left - $(this).offset().left)
let top = parseInt(ui.offset.top - $(this).offset().top) - 10
if (top < 25) {
top = 25
}
// Generate template node
$('#canvas').append(rtTasksTpl({
id: id,
name: id,
x: left,
y: top,
isAttachment: self.config.isAttachment,
taskType: findComponentDownward(self.dag.$root, 'dag-chart').dagBarId
}))
// Get the generated node
let thisDom = jsPlumb.getSelector('.statemachine-demo .w')
// Generating a connection node
self.JspInstance.batch(() => {
self.initNode(thisDom[thisDom.length - 1])
})
selfId = id
}
})
}
}
/**
* Echo json processing and old data structure processing
*/
JSP.prototype.jsonHandle = function ({ largeJson, locations }) {
_.map(largeJson, v => {
// Generate template
$('#canvas').append(rtTasksTpl({
id: v.id,
name: v.name,
x: locations[v.id]['x'],
y: locations[v.id]['y'],
targetarr: locations[v.id]['targetarr'],
isAttachment: this.config.isAttachment,
taskType: v.type,
runFlag: v.runFlag
}))
// contextmenu event
$(`#${v.id}`).on('contextmenu', e => {
this.tasksContextmenu(e)
return false
})
// dblclick event
$(`#${v.id}`).find('.icos').bind('dblclick', e => {
this.tasksDblclick(e)
})
// click event
$(`#${v.id}`).bind('click', e => {
this.tasksClick(e)
})
})
}
/**
* Initialize a single node
*/
JSP.prototype.initNode = function (el) {
// Whether to drag
if (this.config.isDrag) {
this.JspInstance.draggable(el, {
containment: 'dag-container'
})
}
// Node attribute configuration
this.JspInstance.makeSource(el, {
filter: '.ep',
anchor: 'Continuous',
connectorStyle: {
stroke: '#555',
strokeWidth: 2,
outlineStroke: 'transparent',
outlineWidth: 4
},
// This place is leaking
// connectionType: "basic",
extract: {
action: 'the-action'
},
maxConnections: -1
})
// Node connection property configuration
this.JspInstance.makeTarget(el, {
dropOptions: { hoverClass: 'dragHover' },
anchor: 'Continuous',
allowLoopback: false // Forbid yourself to connect yourself
})
this.JspInstance.fire('jsPlumbDemoNodeAdded', el)
}
/**
* Node right click menu
*/
JSP.prototype.tasksContextmenu = function (event) {
if (this.config.isContextmenu) {
let routerName = router.history.current.name
// state
let isOne = routerName === 'projects-definition-details' && this.dag.releaseState !== 'NOT_RELEASE'
// hide
let isTwo = store.state.dag.isDetails
let html = [
`<a href="javascript:" id="startRunning" class="${isOne ? '' : 'disbled'}"><em class="ans-icon-play"></em><span>${i18n.$t('Start')}</span></a>`,
`<a href="javascript:" id="editNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-edit"></em><span>${i18n.$t('Edit')}</span></a>`,
`<a href="javascript:" id="copyNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-copy"></em><span>${i18n.$t('Copy')}</span></a>`,
`<a href="javascript:" id="removeNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-trash"></em><span>${i18n.$t('Delete')}</span></a>`
]
let operationHtml = () => {
return html.splice(',')
}
let e = event
let $id = e.currentTarget.id
let $contextmenu = $('#contextmenu')
let $name = $(`#${$id}`).find('.name-p').text()
let $left = e.pageX + document.body.scrollLeft - 5
let $top = e.pageY + document.body.scrollTop - 5
$contextmenu.css({
left: $left,
top: $top,
visibility: 'visible'
})
// Action bar
$contextmenu.html('').append(operationHtml)
if (isOne) {
// start run
$('#startRunning').on('click', () => {
let id = router.history.current.params.id
store.dispatch('dag/getStartCheck', { processDefinitionId: id }).then(res => {
let modal = Vue.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mStart, {
on: {
onUpdate () {
modal.remove()
},
close () {
modal.remove()
}
},
props: {
item: {
id: id
},
startNodeList: $name,
sourceType: 'contextmenu'
}
})
}
})
}).catch(e => {
Vue.$message.error(e.msg || '')
})
})
}
if (!isTwo) {
// edit node
$(`#editNodes`).click(ev => {
findComponentDownward(this.dag.$root, 'dag-chart')._createNodes({
id: $id,
type: $(`#${$id}`).attr('data-tasks-type')
})
})
// delete node
$('#removeNodes').click(ev => {
this.removeNodes($id)
})
// copy node
$('#copyNodes').click(res => {
this.copyNodes($id)
})
}
}
}
/**
* Node double click event
*/
JSP.prototype.tasksDblclick = function (e) {
// Untie event
if (this.config.isDblclick) {
let id = $(e.currentTarget.offsetParent).attr('id')
findComponentDownward(this.dag.$root, 'dag-chart')._createNodes({
id: id,
type: $(`#${id}`).attr('data-tasks-type')
})
}
}
/**
* Node click event
*/
JSP.prototype.tasksClick = function (e) {
let $id
let self = this
let $body = $(`body`)
if (this.config.isClick) {
let $connect = this.selectedElement.connect
$('.w').removeClass('jtk-tasks-active')
$(e.currentTarget).addClass('jtk-tasks-active')
if ($connect) {
setSvgColor($connect, '#555')
this.selectedElement.connect = null
}
this.selectedElement.id = $(e.currentTarget).attr('id')
// Unbind copy and paste events
$body.unbind('copy').unbind('paste')
// Copy binding id
$id = self.selectedElement.id
$body.bind({
copy: function () {
$id = self.selectedElement.id
},
paste: function () {
$id && self.copyNodes($id)
}
})
}
}
/**
* Remove binding events
* paste
*/
JSP.prototype.removePaste = function () {
let $body = $(`body`)
// Unbind copy and paste events
$body.unbind('copy').unbind('paste')
// Remove selected node parameters
this.selectedElement.id = null
// Remove node selection effect
$('.w').removeClass('jtk-tasks-active')
}
/**
* Line click event
*/
JSP.prototype.connectClick = function (e) {
// Set svg color
setSvgColor(e, '#0097e0')
let $id = this.selectedElement.id
if ($id) {
$(`#${$id}`).removeClass('jtk-tasks-active')
this.selectedElement.id = null
}
this.selectedElement.connect = e
}
/**
* toolbarEvent
* @param {Pointer}
*/
JSP.prototype.handleEventPointer = function (is) {
let wDom = $('.w')
this.setConfig({
isClick: is,
isAttachment: false
})
wDom.removeClass('jtk-ep')
if (!is) {
wDom.removeClass('jtk-tasks-active')
this.selectedElement = {}
_.map($('#canvas svg'), v => {
if ($(v).attr('class')) {
_.map($(v).find('path'), v1 => {
$(v1).attr('fill', '#555')
$(v1).attr('stroke', '#555')
})
}
})
}
}
/**
* toolbarEvent
* @param {Line}
*/
JSP.prototype.handleEventLine = function (is) {
let wDom = $('.w')
this.setConfig({
isAttachment: is
})
is ? wDom.addClass('jtk-ep') : wDom.removeClass('jtk-ep')
}
/**
* toolbarEvent
* @param {Remove}
*/
JSP.prototype.handleEventRemove = function () {
let $id = this.selectedElement.id || null
let $connect = this.selectedElement.connect || null
if ($id) {
this.removeNodes(this.selectedElement.id)
} else {
this.removeConnect($connect)
}
// Monitor whether to edit DAG
store.commit('dag/setIsEditDag', true)
}
/**
* Delete node
*/
JSP.prototype.removeNodes = function ($id) {
// Delete node processing(data-targetarr)
_.map(tasksAll(), v => {
let targetarr = v.targetarr.split(',')
if (targetarr.length) {
let newArr = _.filter(targetarr, v1 => v1 !== $id)
$(`#${v.id}`).attr('data-targetarr', newArr.toString())
}
})
// delete node
this.JspInstance.remove($id)
// delete dom
$(`#${$id}`).remove()
}
/**
* Delete connection
*/
JSP.prototype.removeConnect = function ($connect) {
if (!$connect) {
return
}
// Remove connections and remove node and node dependencies
let targetId = $connect.targetId
let sourceId = $connect.sourceId
let targetarr = rtTargetarrArr(targetId)
if (targetarr.length) {
targetarr = _.filter(targetarr, v => v !== sourceId)
$(`#${targetId}`).attr('data-targetarr', targetarr.toString())
}
this.JspInstance.deleteConnection($connect)
this.selectedElement = {}
}
/**
* Copy node
*/
JSP.prototype.copyNodes = function ($id) {
let newNodeInfo = _.cloneDeep(_.find(store.state.dag.tasks, v => v.id === $id))
let newNodePors = store.state.dag.locations[$id]
// Unstored nodes do not allow replication
if (!newNodePors) {
return
}
// Generate random id
let newUuId = `${uuid() + uuid()}`
let id = newNodeInfo.id.length > 8 ? newNodeInfo.id.substr(0, 7) : newNodeInfo.id
let name = newNodeInfo.name.length > 8 ? newNodeInfo.name.substr(0, 7) : newNodeInfo.name
// new id
let newId = `${id || ''}-${newUuId}`
// new name
let newName = `${name || ''}-${newUuId}`
// coordinate x
let newX = newNodePors.x + 100
// coordinate y
let newY = newNodePors.y + 40
// Generate template node
$('#canvas').append(rtTasksTpl({
id: newId,
name: newName,
x: newX,
y: newY,
isAttachment: this.config.isAttachment,
taskType: newNodeInfo.type
}))
// Get the generated node
let thisDom = jsPlumb.getSelector('.statemachine-demo .w')
// Copy node information
newNodeInfo = Object.assign(newNodeInfo, {
id: newId,
name: newName
})
// Add new node
store.commit('dag/addTasks', newNodeInfo)
// Add node location information
store.commit('dag/addLocations', {
[newId]: {
name: newName,
targetarr: '',
x: newX,
y: newY
}
})
// Generating a connection node
this.JspInstance.batch(() => {
this.initNode(thisDom[thisDom.length - 1])
// Add events to nodes
this.tasksEvent(newId)
})
}
/**
* toolbarEvent
* @param {Screen}
*/
JSP.prototype.handleEventScreen = function ({ item, is }) {
let screenOpen = true
if (is) {
item.icon = 'ans-icon-min'
screenOpen = true
} else {
item.icon = 'ans-icon-max'
screenOpen = false
}
let $mainLayoutModel = $('.main-layout-model')
if (screenOpen) {
$mainLayoutModel.addClass('dag-screen')
} else {
$mainLayoutModel.removeClass('dag-screen')
}
}
/**
* save task
* @param tasks
* @param locations
* @param connects
*/
JSP.prototype.saveStore = function () {
return new Promise((resolve, reject) => {
let connects = []
let locations = {}
let tasks = []
let is = (id) => {
return !!_.filter(tasksAll(), v => v.id === id).length
}
// task
_.map(_.cloneDeep(store.state.dag.tasks), v => {
if (is(v.id)) {
let preTasks = []
let id = $(`#${v.id}`)
let tar = id.attr('data-targetarr')
let idDep = tar ? id.attr('data-targetarr').split(',') : []
if (idDep.length) {
_.map(idDep, v1 => {
preTasks.push($(`#${v1}`).find('.name-p').text())
})
}
let tasksParam = _.assign(v, {
preTasks: preTasks
})
// Sub-workflow has no retries and interval
if (v.type === 'SUB_PROCESS') {
tasksParam = _.omit(tasksParam, ['maxRetryTimes', 'retryInterval'])
}
tasks.push(tasksParam)
}
})
_.map(this.JspInstance.getConnections(), v => {
connects.push({
'endPointSourceId': v.sourceId,
'endPointTargetId': v.targetId
})
})
_.map(tasksAll(), v => {
locations[v.id] = {
name: v.name,
targetarr: v.targetarr,
x: v.x,
y: v.y
}
})
// Storage node
store.commit('dag/setTasks', tasks)
// Store coordinate information
store.commit('dag/setLocations', locations)
// Storage line dependence
store.commit('dag/setConnects', connects)
resolve({
connects: connects,
tasks: tasks,
locations: locations
})
})
}
/**
* Event processing
*/
JSP.prototype.handleEvent = function () {
this.JspInstance.bind('beforeDrop', function (info) {
let sourceId = info['sourceId']// 出
let targetId = info['targetId']// 入
/**
* Recursive search for nodes
*/
let recursiveVal
const recursiveTargetarr = (arr, targetId) => {
for (let i in arr) {
if (arr[i] === targetId) {
recursiveVal = targetId
} else {
recursiveTargetarr(rtTargetarrArr(arr[i]), targetId)
}
}
return recursiveVal
}
// Connection to connected nodes is not allowed
if (_.findIndex(rtTargetarrArr(targetId), v => v === sourceId) !== -1) {
return false
}
// Recursive form to find if the target Targetarr has a sourceId
if (recursiveTargetarr(rtTargetarrArr(sourceId), targetId)) {
return false
}
// Storage node dependency information
saveTargetarr(sourceId, targetId)
// Monitor whether to edit DAG
store.commit('dag/setIsEditDag', true)
return true
})
}
/**
* Backfill data processing
*/
JSP.prototype.jspBackfill = function ({ connects, locations, largeJson }) {
// Backfill nodes
this.jsonHandle({
largeJson: largeJson,
locations: locations
})
let wNodes = jsPlumb.getSelector('.statemachine-demo .w')
// Backfill line
this.JspInstance.batch(() => {
for (let i = 0; i < wNodes.length; i++) {
this.initNode(wNodes[i])
}
_.map(connects, v => {
let sourceId = v.endPointSourceId.split('-')
let targetId = v.endPointTargetId.split('-')
if (sourceId.length === 4 && targetId.length === 4) {
sourceId = `${sourceId[0]}-${sourceId[1]}-${sourceId[2]}`
targetId = `${targetId[0]}-${targetId[1]}-${targetId[2]}`
} else {
sourceId = v.endPointSourceId
targetId = v.endPointTargetId
}
this.JspInstance.connect({
source: sourceId,
target: targetId,
type: 'basic',
paintStyle: { strokeWidth: 2, stroke: '#555' }
})
})
})
jsPlumb.fire('jsPlumbDemoLoaded', this.JspInstance)
// Connection monitoring
this.handleEvent()
// Drag and drop new nodes
this.draggable()
}
export default new JSP()
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,996 | [BUG] install.sh will happen "sh: bin/dolphinscheduler-daemon.sh: No such file or directory" when first deploy | **Describe the bug**
when I first deploy the DolphinScheduler, I find that
```
# 3,stop server
echo "3,stop server"
sh ${workDir}/script/stop-all.sh
...
# 5,scp resources
echo "5,scp resources"
sh ${workDir}/script/scp-hosts.sh
if [ $? -eq 0 ]
then
echo 'scp copy completed'
else
echo 'scp copy failed to exit'
exit -1
fi
```
**Which version of Dolphin Scheduler:**
-[1.2.0]
**how to solve**
the 3 step should change position to the 5 step, becase on the 3 step, the shell scripts not exists in the install path | https://github.com/apache/dolphinscheduler/issues/1996 | https://github.com/apache/dolphinscheduler/pull/1998 | b4f2e5f4210ce70d6ec50e68fd177ee26841fc03 | 5ecd3b30b923dc4dfa6ac846b6ac44bfb2f8e11f | "2020-02-22T12:20:33Z" | java | "2020-02-23T08:59:24Z" | install.sh | #!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
workDir=`dirname $0`
workDir=`cd ${workDir};pwd`
#To be compatible with MacOS and Linux
txt=""
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
txt="''"
elif [[ "$OSTYPE" == "linux-gnu" ]]; then
# linux
txt=""
elif [[ "$OSTYPE" == "cygwin" ]]; then
# POSIX compatibility layer and Linux environment emulation for Windows
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "msys" ]]; then
# Lightweight shell and GNU utilities compiled for Windows (part of MinGW)
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "win32" ]]; then
echo "DolphinScheduler not support Windows operating system"
exit 1
elif [[ "$OSTYPE" == "freebsd"* ]]; then
# ...
txt=""
else
# Unknown.
echo "Operating system unknown, please tell us(submit issue) for better service"
exit 1
fi
source ${workDir}/conf/config/install_config.conf
# for example postgresql or mysql ...
dbtype="postgresql"
# db config
# db address and port
dbhost="192.168.xx.xx:5432"
# db name
dbname="dolphinscheduler"
# db username
username="xx"
# db passwprd
# Note: if there are special characters, please use the \ transfer character to transfer
passowrd="xx"
# conf/config/install_config.conf config
# Note: the installation path is not the same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler"
# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"
# zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ark0,ark1,ark2,ark3,ark4"
# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort=22
# run master machine
# Note: list of hosts hostname for deploying master
masters="ark0,ark1"
# run worker machine
# note: list of machine hostnames for deploying workers
workers="ark2,ark3,ark4"
# run alert machine
# note: list of machine hostnames for deploying alert server
alertServer="ark3"
# run api machine
# note: list of machine hostnames for deploying api server
apiServers="ark1"
# alert config
# mail protocol
mailProtocol="SMTP"
# mail server host
mailServerHost="smtp.exmail.qq.com"
# mail server port
mailServerPort="25"
# sender
mailSender="xxxxxxxxxx"
# user
mailUser="xxxxxxxxxx"
# sender password
mailPassword="xxxxxxxxxx"
# TLS mail protocol support
starttlsEnable="false"
sslTrust="xxxxxxxxxx"
# SSL mail protocol support
# note: The SSL protocol is enabled by default.
# only one of TLS and SSL can be in the true state.
sslEnable="true"
# download excel path
xlsFilePath="/tmp/xls"
# Enterprise WeChat Enterprise ID Configuration
enterpriseWechatCorpId="xxxxxxxxxx"
# Enterprise WeChat application Secret configuration
enterpriseWechatSecret="xxxxxxxxxx"
# Enterprise WeChat Application AgentId Configuration
enterpriseWechatAgentId="xxxxxxxxxx"
# Enterprise WeChat user configuration, multiple users to , split
enterpriseWechatUsers="xxxxx,xxxxx"
# whether to start monitoring self-starting scripts
monitorServerState="false"
# resource Center upload and select storage method:HDFS,S3,NONE
resUploadStartupType="NONE"
# if resUploadStartupType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"
# if S3 is configured, the following configuration is required.
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# resourcemanager HA configuration, if it is a single resourcemanager, here is yarnHaIps=""
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine.
singleYarnIp="ark1"
# hdfs root path, the owner of the root path must be the deployment user.
# versions prior to 1.1.0 do not automatically create the hdfs root directory, you need to create it yourself.
hdfsPath="/dolphinscheduler"
# have users who create directory permissions under hdfs root path /
# Note: if kerberos is enabled, hdfsRootUser="" can be used directly.
hdfsRootUser="hdfs"
# common config
# Program root path
programPath="/tmp/dolphinscheduler"
# download path
downloadPath="/tmp/dolphinscheduler/download"
# task execute path
execPath="/tmp/dolphinscheduler/exec"
# SHELL environmental variable path
shellEnvPath="$installPath/conf/env/dolphinscheduler_env.sh"
# suffix of the resource file
resSuffixs="txt,log,sh,conf,cfg,py,java,sql,hql,xml"
# development status, if true, for the SHELL script, you can view the encapsulated SHELL script in the execPath directory.
# If it is false, execute the direct delete
devState="true"
# kerberos config
# kerberos whether to start
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username
keytabUserName="[email protected]"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
# zk config
# zk root directory
zkRoot="/dolphinscheduler"
# zk session timeout
zkSessionTimeout="300"
# zk connection timeout
zkConnectionTimeout="300"
# zk retry interval
zkRetryMaxSleep="100"
# zk retry maximum number of times
zkRetryMaxtime="5"
# master config
# master execution thread maximum number, maximum parallelism of process instance
masterExecThreads="100"
# the maximum number of master task execution threads, the maximum degree of parallelism for each process instance
masterExecTaskNum="20"
# master heartbeat interval
masterHeartbeatInterval="10"
# master task submission retries
masterTaskCommitRetryTimes="5"
# master task submission retry interval
masterTaskCommitInterval="1000"
# master maximum cpu average load, used to determine whether the master has execution capability
masterMaxCpuLoadAvg="100"
# master reserve memory to determine if the master has execution capability
masterReservedMemory="0.1"
# worker config
# worker execution thread
workerExecThreads="100"
# worker heartbeat interval
workerHeartbeatInterval="10"
# worker number of fetch tasks
workerFetchTaskNum="3"
# worker reserve memory to determine if the master has execution capability
workerReservedMemory="0.1"
# api config
# api server port
apiServerPort="12345"
# api session timeout
apiServerSessionTimeout="7200"
# api server context path
apiServerContextPath="/dolphinscheduler/"
# spring max file size
springMaxFileSize="1024MB"
# spring max request size
springMaxRequestSize="1024MB"
# api max http post size
apiMaxHttpPostSize="5000000"
# 1,replace file
echo "1,replace file"
if [ $dbtype == "mysql" ];then
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=com.mysql.jdbc.Driver#g" conf/application.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:mysql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=com.mysql.jdbc.Driver#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate#g" conf/quartz.properties
fi
if [ $dbtype == "postgresql" ];then
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${passowrd}#g" conf/application.properties
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=org.postgresql.Driver#g" conf/application.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.URL.*#org.quartz.dataSource.myDs.URL=jdbc:postgresql://${dbhost}/${dbname}?characterEncoding=UTF-8#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.user.*#org.quartz.dataSource.myDs.user=${username}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.password.*#org.quartz.dataSource.myDs.password=${passowrd}#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.dataSource.myDs.driver.*#org.quartz.dataSource.myDs.driver=org.postgresql.Driver#g" conf/quartz.properties
sed -i ${txt} "s#org.quartz.jobStore.driverDelegateClass.*#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.PostgreSQLDelegate#g" conf/quartz.properties
fi
sed -i ${txt} "s#master.exec.threads.*#master.exec.threads=${masterExecThreads}#g" conf/application.properties
sed -i ${txt} "s#master.exec.task.num.*#master.exec.task.num=${masterExecTaskNum}#g" conf/application.properties
sed -i ${txt} "s#master.heartbeat.interval.*#master.heartbeat.interval=${masterHeartbeatInterval}#g" conf/application.properties
sed -i ${txt} "s#master.task.commit.retryTimes.*#master.task.commit.retryTimes=${masterTaskCommitRetryTimes}#g" conf/application.properties
sed -i ${txt} "s#master.task.commit.interval.*#master.task.commit.interval=${masterTaskCommitInterval}#g" conf/application.properties
sed -i ${txt} "s#master.reserved.memory.*#master.reserved.memory=${masterReservedMemory}#g" conf/application.properties
sed -i ${txt} "s#worker.exec.threads.*#worker.exec.threads=${workerExecThreads}#g" conf/application.properties
sed -i ${txt} "s#worker.heartbeat.interval.*#worker.heartbeat.interval=${workerHeartbeatInterval}#g" conf/application.properties
sed -i ${txt} "s#worker.fetch.task.num.*#worker.fetch.task.num=${workerFetchTaskNum}#g" conf/application.properties
sed -i ${txt} "s#worker.reserved.memory.*#worker.reserved.memory=${workerReservedMemory}#g" conf/application.properties
sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties
sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties
sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common.properties
sed -i ${txt} "s#data.basedir.path.*#data.basedir.path=${programPath}#g" conf/common.properties
sed -i ${txt} "s#data.download.basedir.path.*#data.download.basedir.path=${downloadPath}#g" conf/common.properties
sed -i ${txt} "s#process.exec.basepath.*#process.exec.basepath=${execPath}#g" conf/common.properties
sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties
sed -i ${txt} "s#data.store2hdfs.basepath.*#data.store2hdfs.basepath=${hdfsPath}#g" conf/common.properties
sed -i ${txt} "s#res.upload.startup.type.*#res.upload.startup.type=${resUploadStartupType}#g" conf/common.properties
sed -i ${txt} "s#dolphinscheduler.env.path.*#dolphinscheduler.env.path=${shellEnvPath}#g" conf/common.properties
sed -i ${txt} "s#resource.view.suffixs.*#resource.view.suffixs=${resSuffixs}#g" conf/common.properties
sed -i ${txt} "s#development.state.*#development.state=${devState}#g" conf/common.properties
sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties
sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.dolphinscheduler.root.*#zookeeper.dolphinscheduler.root=${zkRoot}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.session.timeout.*#zookeeper.session.timeout=${zkSessionTimeout}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.connection.timeout.*#zookeeper.connection.timeout=${zkConnectionTimeout}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.retry.max.sleep.*#zookeeper.retry.max.sleep=${zkRetryMaxSleep}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.retry.maxtime.*#zookeeper.retry.maxtime=${zkRetryMaxtime}#g" conf/common.properties
sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties
sed -i ${txt} "s#server.servlet.session.timeout.*#server.servlet.session.timeout=${apiServerSessionTimeout}#g" conf/application-api.properties
sed -i ${txt} "s#server.servlet.context-path.*#server.servlet.context-path=${apiServerContextPath}#g" conf/application-api.properties
sed -i ${txt} "s#spring.servlet.multipart.max-file-size.*#spring.servlet.multipart.max-file-size=${springMaxFileSize}#g" conf/application-api.properties
sed -i ${txt} "s#spring.servlet.multipart.max-request-size.*#spring.servlet.multipart.max-request-size=${springMaxRequestSize}#g" conf/application-api.properties
sed -i ${txt} "s#server.jetty.max-http-post-size.*#server.jetty.max-http-post-size=${apiMaxHttpPostSize}#g" conf/application-api.properties
sed -i ${txt} "s#mail.protocol.*#mail.protocol=${mailProtocol}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
sed -i ${txt} "s#mail.user.*#mail.user=${mailUser}#g" conf/alert.properties
sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.starttls.enable.*#mail.smtp.starttls.enable=${starttlsEnable}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.trust.*#mail.smtp.ssl.trust=${sslTrust}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.enable.*#mail.smtp.ssl.enable=${sslEnable}#g" conf/alert.properties
sed -i ${txt} "s#xls.file.path.*#xls.file.path=${xlsFilePath}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.corp.id.*#enterprise.wechat.corp.id=${enterpriseWechatCorpId}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.secret.*#enterprise.wechat.secret=${enterpriseWechatSecret}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.agent.id.*#enterprise.wechat.agent.id=${enterpriseWechatAgentId}#g" conf/alert.properties
sed -i ${txt} "s#enterprise.wechat.users.*#enterprise.wechat.users=${enterpriseWechatUsers}#g" conf/alert.properties
sed -i ${txt} "s#installPath.*#installPath=${installPath}#g" conf/config/install_config.conf
sed -i ${txt} "s#deployUser.*#deployUser=${deployUser}#g" conf/config/install_config.conf
sed -i ${txt} "s#ips.*#ips=${ips}#g" conf/config/install_config.conf
sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
sed -i ${txt} "s#masters.*#masters=${masters}#g" conf/config/install_config.conf
sed -i ${txt} "s#workers.*#workers=${workers}#g" conf/config/install_config.conf
sed -i ${txt} "s#alertServer.*#alertServer=${alertServer}#g" conf/config/install_config.conf
sed -i ${txt} "s#apiServers.*#apiServers=${apiServers}#g" conf/config/install_config.conf
sed -i ${txt} "s#sshPort.*#sshPort=${sshPort}#g" conf/config/install_config.conf
# 2,create directory
echo "2,create directory"
if [ ! -d $installPath ];then
sudo mkdir -p $installPath
sudo chown -R $deployUser:$deployUser $installPath
fi
hostsArr=(${ips//,/ })
for host in ${hostsArr[@]}
do
# create if programPath does not exist
if ! ssh -p $sshPort $host test -e $programPath; then
ssh -p $sshPort $host "sudo mkdir -p $programPath;sudo chown -R $deployUser:$deployUser $programPath"
fi
# create if downloadPath does not exist
if ! ssh -p $sshPort $host test -e $downloadPath; then
ssh -p $sshPort $host "sudo mkdir -p $downloadPath;sudo chown -R $deployUser:$deployUser $downloadPath"
fi
# create if execPath does not exist
if ! ssh -p $sshPort $host test -e $execPath; then
ssh -p $sshPort $host "sudo mkdir -p $execPath; sudo chown -R $deployUser:$deployUser $execPath"
fi
# create if xlsFilePath does not exist
if ! ssh -p $sshPort $host test -e $xlsFilePath; then
ssh -p $sshPort $host "sudo mkdir -p $xlsFilePath; sudo chown -R $deployUser:$deployUser $xlsFilePath"
fi
done
# 3,stop server
echo "3,stop server"
sh ${workDir}/script/stop-all.sh
# 4,delete zk node
echo "4,delete zk node"
sh ${workDir}/script/remove-zk-node.sh $zkRoot
# 5,scp resources
echo "5,scp resources"
sh ${workDir}/script/scp-hosts.sh
if [ $? -eq 0 ]
then
echo 'scp copy completed'
else
echo 'scp copy failed to exit'
exit -1
fi
# 6,startup
echo "6,startup"
sh ${workDir}/script/start-all.sh |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,111 | [BUG] sun.misc.JavaIOFileDescriptorAccess is not portable | Current implementation relies on `sun.misc.JavaIOFileDescriptorAccess` which is only accessible on oraclejdk8.
Basically the demand is getting & setting `field` field of `FileDescriptor`, so we can directly do that with reflection.
Though, I suspect the necessity we introduce `ProcessImplForWin32`. Maybe we could have a better way to support worker server to run bat script. | https://github.com/apache/dolphinscheduler/issues/2111 | https://github.com/apache/dolphinscheduler/pull/2113 | 450a1f56fc73f088fce89a343a0b008706f2088c | 9224b49b58b756d22c75d8929108f716283282b4 | "2020-03-08T05:09:46Z" | java | "2020-03-09T11:06:41Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.shell.ShellExecutor;
import org.apache.commons.configuration.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import oshi.SystemInfo;
import oshi.hardware.CentralProcessor;
import oshi.hardware.GlobalMemory;
import oshi.hardware.HardwareAbstractionLayer;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.math.RoundingMode;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.text.DecimalFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
/**
* os utils
*
*/
public class OSUtils {
private static final Logger logger = LoggerFactory.getLogger(OSUtils.class);
private static final SystemInfo SI = new SystemInfo();
public static final String TWO_DECIMAL = "0.00";
private static HardwareAbstractionLayer hal = SI.getHardware();
private OSUtils() {}
/**
* get memory usage
* Keep 2 decimal
* @return percent %
*/
public static double memoryUsage() {
GlobalMemory memory = hal.getMemory();
double memoryUsage = (memory.getTotal() - memory.getAvailable() - memory.getSwapUsed()) * 0.1 / memory.getTotal() * 10;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(memoryUsage));
}
/**
* get available physical memory size
*
* Keep 2 decimal
* @return available Physical Memory Size, unit: G
*/
public static double availablePhysicalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = (memory.getAvailable() + memory.getSwapUsed()) /1024.0/1024/1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* get total physical memory size
*
* Keep 2 decimal
* @return available Physical Memory Size, unit: G
*/
public static double totalMemorySize() {
GlobalMemory memory = hal.getMemory();
double availablePhysicalMemorySize = memory.getTotal() /1024.0/1024/1024;
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(availablePhysicalMemorySize));
}
/**
* load average
*
* @return load average
*/
public static double loadAverage() {
double loadAverage = hal.getProcessor().getSystemLoadAverage();
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(loadAverage));
}
/**
* get cpu usage
*
* @return cpu usage
*/
public static double cpuUsage() {
CentralProcessor processor = hal.getProcessor();
double cpuUsage = processor.getSystemCpuLoad();
DecimalFormat df = new DecimalFormat(TWO_DECIMAL);
df.setRoundingMode(RoundingMode.HALF_UP);
return Double.parseDouble(df.format(cpuUsage));
}
public static List<String> getUserList() {
try {
if (isMacOS()) {
return getUserListFromMac();
} else if (isWindows()) {
return getUserListFromWindows();
} else {
return getUserListFromLinux();
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return Collections.emptyList();
}
/**
* get user list from linux
*
* @return user list
*/
private static List<String> getUserListFromLinux() throws IOException {
List<String> userList = new ArrayList<>();
try (BufferedReader bufferedReader = new BufferedReader(
new InputStreamReader(new FileInputStream("/etc/passwd")))) {
String line;
while ((line = bufferedReader.readLine()) != null) {
if (line.contains(":")) {
String[] userInfo = line.split(":");
userList.add(userInfo[0]);
}
}
}
return userList;
}
/**
* get user list from mac
* @return user list
*/
private static List<String> getUserListFromMac() throws IOException {
String result = exeCmd("dscl . list /users");
if (StringUtils.isNotEmpty(result)) {
return Arrays.asList(result.split( "\n"));
}
return Collections.emptyList();
}
/**
* get user list from windows
* @return user list
* @throws IOException
*/
private static List<String> getUserListFromWindows() throws IOException {
String result = exeCmd("net user");
String[] lines = result.split("\n");
int startPos = 0;
int endPos = lines.length - 2;
for (int i = 0; i < lines.length; i++) {
if (lines[i].isEmpty()) {
continue;
}
int count = 0;
if (lines[i].charAt(0) == '-') {
for (int j = 0; j < lines[i].length(); j++) {
if (lines[i].charAt(i) == '-') {
count++;
}
}
}
if (count == lines[i].length()) {
startPos = i + 1;
break;
}
}
List<String> users = new ArrayList<>();
while (startPos <= endPos) {
Pattern pattern = Pattern.compile("\\s+");
users.addAll(Arrays.asList(pattern.split(lines[startPos])));
startPos++;
}
return users;
}
/**
* create user
* @param userName user name
* @return true if creation was successful, otherwise false
*/
public static boolean createUser(String userName) {
try {
String userGroup = OSUtils.getGroup();
if (StringUtils.isEmpty(userGroup)) {
logger.error("{} group does not exist for this operating system.", userGroup);
return false;
}
if (isMacOS()) {
createMacUser(userName, userGroup);
} else if (isWindows()) {
createWindowsUser(userName, userGroup);
} else {
createLinuxUser(userName, userGroup);
}
return true;
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
return false;
}
/**
* create linux user
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createLinuxUser(String userName, String userGroup) throws IOException {
logger.info("create linux os user : {}", userName);
String cmd = String.format("sudo useradd -g %s %s", userGroup, userName);
logger.info("execute cmd : {}", cmd);
OSUtils.exeCmd(cmd);
}
/**
* create mac user (Supports Mac OSX 10.10+)
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createMacUser(String userName, String userGroup) throws IOException {
logger.info("create mac os user : {}", userName);
String userCreateCmd = String.format("sudo sysadminctl -addUser %s -password %s", userName, userName);
String appendGroupCmd = String.format("sudo dseditgroup -o edit -a %s -t user %s", userName, userGroup);
logger.info("create user command : {}", userCreateCmd);
OSUtils.exeCmd(userCreateCmd);
logger.info("append user to group : {}", appendGroupCmd);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* create windows user
* @param userName user name
* @param userGroup user group
* @throws IOException in case of an I/O error
*/
private static void createWindowsUser(String userName, String userGroup) throws IOException {
logger.info("create windows os user : {}", userName);
String userCreateCmd = String.format("net user \"%s\" /add", userName);
String appendGroupCmd = String.format("net localgroup \"%s\" \"%s\" /add", userGroup, userName);
logger.info("execute create user command : {}", userCreateCmd);
OSUtils.exeCmd(userCreateCmd);
logger.info("execute append user to group : {}", appendGroupCmd);
OSUtils.exeCmd(appendGroupCmd);
}
/**
* get system group information
* @return system group info
* @throws IOException errors
*/
public static String getGroup() throws IOException {
if (isWindows()) {
String currentProcUserName = System.getProperty("user.name");
String result = exeCmd(String.format("net user \"%s\"", currentProcUserName));
String line = result.split("\n")[22];
String group = Pattern.compile("\\s+").split(line)[1];
if (group.charAt(0) == '*') {
return group.substring(1);
} else {
return group;
}
} else {
String result = exeCmd("groups");
if (StringUtils.isNotEmpty(result)) {
String[] groupInfo = result.split(" ");
return groupInfo[0];
}
}
return null;
}
/**
* Execute the corresponding command of Linux or Windows
*
* @param command command
* @return result of execute command
* @throws IOException errors
*/
public static String exeCmd(String command) throws IOException {
BufferedReader br = null;
try {
Process p = Runtime.getRuntime().exec(command);
br = new BufferedReader(new InputStreamReader(p.getInputStream()));
String line;
StringBuilder sb = new StringBuilder();
while ((line = br.readLine()) != null) {
sb.append(line + "\n");
}
return sb.toString();
} finally {
if (br != null) {
try {
br.close();
} catch (Exception e) {
logger.error(e.getMessage(), e);
}
}
}
}
/**
* Execute the shell
* @param command command
* @return result of execute the shell
* @throws IOException errors
*/
public static String exeShell(String command) throws IOException {
return ShellExecutor.execCommand(command);
}
/**
* get process id
* @return process id
*/
public static int getProcessID() {
RuntimeMXBean runtimeMXBean = ManagementFactory.getRuntimeMXBean();
return Integer.parseInt(runtimeMXBean.getName().split("@")[0]);
}
/**
* get local host
* @return host
*/
public static String getHost(){
try {
return InetAddress.getLocalHost().getHostAddress();
} catch (UnknownHostException e) {
logger.error(e.getMessage(),e);
}
return null;
}
/**
* whether is macOS
* @return true if mac
*/
public static boolean isMacOS() {
String os = System.getProperty("os.name");
return os.startsWith("Mac");
}
/**
* whether is windows
* @return true if windows
*/
public static boolean isWindows() {
String os = System.getProperty("os.name");
return os.startsWith("Windows");
}
/**
* check memory and cpu usage
* @return check memory and cpu usage
*/
public static Boolean checkResource(double systemCpuLoad, double systemReservedMemory){
// judging usage
double loadAverage = OSUtils.loadAverage();
//
double availablePhysicalMemorySize = OSUtils.availablePhysicalMemorySize();
if(loadAverage > systemCpuLoad || availablePhysicalMemorySize < systemReservedMemory){
logger.warn("load or availablePhysicalMemorySize(G) is too high, it's availablePhysicalMemorySize(G):{},loadAvg:{}", availablePhysicalMemorySize , loadAverage);
return false;
}else{
return true;
}
}
/**
* check memory and cpu usage
* @param conf conf
* @param isMaster is master
* @return check memory and cpu usage
*/
public static Boolean checkResource(Configuration conf, Boolean isMaster){
double systemCpuLoad;
double systemReservedMemory;
if(Boolean.TRUE.equals(isMaster)){
systemCpuLoad = conf.getDouble(Constants.MASTER_MAX_CPULOAD_AVG, Constants.DEFAULT_MASTER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.MASTER_RESERVED_MEMORY, Constants.DEFAULT_MASTER_RESERVED_MEMORY);
}else{
systemCpuLoad = conf.getDouble(Constants.WORKER_MAX_CPULOAD_AVG, Constants.DEFAULT_WORKER_CPU_LOAD);
systemReservedMemory = conf.getDouble(Constants.WORKER_RESERVED_MEMORY, Constants.DEFAULT_WORKER_RESERVED_MEMORY);
}
return checkResource(systemCpuLoad,systemReservedMemory);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,111 | [BUG] sun.misc.JavaIOFileDescriptorAccess is not portable | Current implementation relies on `sun.misc.JavaIOFileDescriptorAccess` which is only accessible on oraclejdk8.
Basically the demand is getting & setting `field` field of `FileDescriptor`, so we can directly do that with reflection.
Though, I suspect the necessity we introduce `ProcessImplForWin32`. Maybe we could have a better way to support worker server to run bat script. | https://github.com/apache/dolphinscheduler/issues/2111 | https://github.com/apache/dolphinscheduler/pull/2113 | 450a1f56fc73f088fce89a343a0b008706f2088c | 9224b49b58b756d22c75d8929108f716283282b4 | "2020-03-08T05:09:46Z" | java | "2020-03-09T11:06:41Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/process/ProcessImplForWin32.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.utils.process;
import com.sun.jna.Pointer;
import com.sun.jna.platform.win32.*;
import com.sun.jna.ptr.IntByReference;
import sun.security.action.GetPropertyAction;
import java.io.*;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.sun.jna.platform.win32.WinBase.STILL_ACTIVE;
public class ProcessImplForWin32 extends Process {
private static final sun.misc.JavaIOFileDescriptorAccess fdAccess
= sun.misc.SharedSecrets.getJavaIOFileDescriptorAccess();
private static final int PIPE_SIZE = 4096 + 24;
private static final int HANDLE_STORAGE_SIZE = 6;
private static final int OFFSET_READ = 0;
private static final int OFFSET_WRITE = 1;
private static final WinNT.HANDLE JAVA_INVALID_HANDLE_VALUE = new WinNT.HANDLE(Pointer.createConstant(-1));
/**
* Open a file for writing. If {@code append} is {@code true} then the file
* is opened for atomic append directly and a FileOutputStream constructed
* with the resulting handle. This is because a FileOutputStream created
* to append to a file does not open the file in a manner that guarantees
* that writes by the child process will be atomic.
*/
private static FileOutputStream newFileOutputStream(File f, boolean append)
throws IOException
{
if (append) {
String path = f.getPath();
SecurityManager sm = System.getSecurityManager();
if (sm != null)
sm.checkWrite(path);
long handle = openForAtomicAppend(path);
final FileDescriptor fd = new FileDescriptor();
fdAccess.setHandle(fd, handle);
return AccessController.doPrivileged(
new PrivilegedAction<FileOutputStream>() {
public FileOutputStream run() {
return new FileOutputStream(fd);
}
}
);
} else {
return new FileOutputStream(f);
}
}
// System-dependent portion of ProcessBuilderForWindows.start()
static Process start(String username,
String password,
String cmdarray[],
java.util.Map<String,String> environment,
String dir,
ProcessBuilderForWin32.Redirect[] redirects,
boolean redirectErrorStream)
throws IOException
{
String envblock = ProcessEnvironmentForWin32.toEnvironmentBlock(environment);
FileInputStream f0 = null;
FileOutputStream f1 = null;
FileOutputStream f2 = null;
try {
long[] stdHandles;
if (redirects == null) {
stdHandles = new long[] { -1L, -1L, -1L };
} else {
stdHandles = new long[3];
if (redirects[0] == ProcessBuilderForWin32.Redirect.PIPE)
stdHandles[0] = -1L;
else if (redirects[0] == ProcessBuilderForWin32.Redirect.INHERIT)
stdHandles[0] = fdAccess.getHandle(FileDescriptor.in);
else {
f0 = new FileInputStream(redirects[0].file());
stdHandles[0] = fdAccess.getHandle(f0.getFD());
}
if (redirects[1] == ProcessBuilderForWin32.Redirect.PIPE)
stdHandles[1] = -1L;
else if (redirects[1] == ProcessBuilderForWin32.Redirect.INHERIT)
stdHandles[1] = fdAccess.getHandle(FileDescriptor.out);
else {
f1 = newFileOutputStream(redirects[1].file(),
redirects[1].append());
stdHandles[1] = fdAccess.getHandle(f1.getFD());
}
if (redirects[2] == ProcessBuilderForWin32.Redirect.PIPE)
stdHandles[2] = -1L;
else if (redirects[2] == ProcessBuilderForWin32.Redirect.INHERIT)
stdHandles[2] = fdAccess.getHandle(FileDescriptor.err);
else {
f2 = newFileOutputStream(redirects[2].file(),
redirects[2].append());
stdHandles[2] = fdAccess.getHandle(f2.getFD());
}
}
return new ProcessImplForWin32(username, password, cmdarray, envblock, dir, stdHandles, redirectErrorStream);
} finally {
// In theory, close() can throw IOException
// (although it is rather unlikely to happen here)
try { if (f0 != null) f0.close(); }
finally {
try { if (f1 != null) f1.close(); }
finally { if (f2 != null) f2.close(); }
}
}
}
private static class LazyPattern {
// Escape-support version:
// "(\")((?:\\\\\\1|.)+?)\\1|([^\\s\"]+)";
private static final Pattern PATTERN =
Pattern.compile("[^\\s\"]+|\"[^\"]*\"");
};
/* Parses the command string parameter into the executable name and
* program arguments.
*
* The command string is broken into tokens. The token separator is a space
* or quota character. The space inside quotation is not a token separator.
* There are no escape sequences.
*/
private static String[] getTokensFromCommand(String command) {
ArrayList<String> matchList = new ArrayList<>(8);
Matcher regexMatcher = ProcessImplForWin32.LazyPattern.PATTERN.matcher(command);
while (regexMatcher.find())
matchList.add(regexMatcher.group());
return matchList.toArray(new String[matchList.size()]);
}
private static final int VERIFICATION_CMD_BAT = 0;
private static final int VERIFICATION_WIN32 = 1;
private static final int VERIFICATION_WIN32_SAFE = 2; // inside quotes not allowed
private static final int VERIFICATION_LEGACY = 3;
// See Command shell overview for documentation of special characters.
// https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-xp/bb490954(v=technet.10)
private static final char ESCAPE_VERIFICATION[][] = {
// We guarantee the only command file execution for implicit [cmd.exe] run.
// http://technet.microsoft.com/en-us/library/bb490954.aspx
{' ', '\t', '<', '>', '&', '|', '^'},
{' ', '\t', '<', '>'},
{' ', '\t', '<', '>'},
{' ', '\t'}
};
private static String createCommandLine(int verificationType,
final String executablePath,
final String cmd[])
{
StringBuilder cmdbuf = new StringBuilder(80);
cmdbuf.append(executablePath);
for (int i = 1; i < cmd.length; ++i) {
cmdbuf.append(' ');
String s = cmd[i];
if (needsEscaping(verificationType, s)) {
cmdbuf.append('"');
if (verificationType == VERIFICATION_WIN32_SAFE) {
// Insert the argument, adding '\' to quote any interior quotes
int length = s.length();
for (int j = 0; j < length; j++) {
char c = s.charAt(j);
if (c == DOUBLEQUOTE) {
int count = countLeadingBackslash(verificationType, s, j);
while (count-- > 0) {
cmdbuf.append(BACKSLASH); // double the number of backslashes
}
cmdbuf.append(BACKSLASH); // backslash to quote the quote
}
cmdbuf.append(c);
}
} else {
cmdbuf.append(s);
}
// The code protects the [java.exe] and console command line
// parser, that interprets the [\"] combination as an escape
// sequence for the ["] char.
// http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
//
// If the argument is an FS path, doubling of the tail [\]
// char is not a problem for non-console applications.
//
// The [\"] sequence is not an escape sequence for the [cmd.exe]
// command line parser. The case of the [""] tail escape
// sequence could not be realized due to the argument validation
// procedure.
int count = countLeadingBackslash(verificationType, s, s.length());
while (count-- > 0) {
cmdbuf.append(BACKSLASH); // double the number of backslashes
}
cmdbuf.append('"');
} else {
cmdbuf.append(s);
}
}
return cmdbuf.toString();
}
/**
* Return the argument without quotes (1st and last) if present, else the arg.
* @param str a string
* @return the string without 1st and last quotes
*/
private static String unQuote(String str) {
int len = str.length();
return (len >= 2 && str.charAt(0) == DOUBLEQUOTE && str.charAt(len - 1) == DOUBLEQUOTE)
? str.substring(1, len - 1)
: str;
}
private static boolean needsEscaping(int verificationType, String arg) {
// Switch off MS heuristic for internal ["].
// Please, use the explicit [cmd.exe] call
// if you need the internal ["].
// Example: "cmd.exe", "/C", "Extended_MS_Syntax"
// For [.exe] or [.com] file the unpaired/internal ["]
// in the argument is not a problem.
String unquotedArg = unQuote(arg);
boolean argIsQuoted = !arg.equals(unquotedArg);
boolean embeddedQuote = unquotedArg.indexOf(DOUBLEQUOTE) >= 0;
switch (verificationType) {
case VERIFICATION_CMD_BAT:
if (embeddedQuote) {
throw new IllegalArgumentException("Argument has embedded quote, " +
"use the explicit CMD.EXE call.");
}
break; // break determine whether to quote
case VERIFICATION_WIN32_SAFE:
if (argIsQuoted && embeddedQuote) {
throw new IllegalArgumentException("Malformed argument has embedded quote: "
+ unquotedArg);
}
break;
default:
break;
}
if (!argIsQuoted) {
char testEscape[] = ESCAPE_VERIFICATION[verificationType];
for (int i = 0; i < testEscape.length; ++i) {
if (arg.indexOf(testEscape[i]) >= 0) {
return true;
}
}
}
return false;
}
private static String getExecutablePath(String path)
throws IOException
{
String name = unQuote(path);
if (name.indexOf(DOUBLEQUOTE) >= 0) {
throw new IllegalArgumentException("Executable name has embedded quote, " +
"split the arguments: " + name);
}
// Win32 CreateProcess requires path to be normalized
File fileToRun = new File(name);
// From the [CreateProcess] function documentation:
//
// "If the file name does not contain an extension, .exe is appended.
// Therefore, if the file name extension is .com, this parameter
// must include the .com extension. If the file name ends in
// a period (.) with no extension, or if the file name contains a path,
// .exe is not appended."
//
// "If the file name !does not contain a directory path!,
// the system searches for the executable file in the following
// sequence:..."
//
// In practice ANY non-existent path is extended by [.exe] extension
// in the [CreateProcess] function with the only exception:
// the path ends by (.)
return fileToRun.getPath();
}
/**
* An executable is any program that is an EXE or does not have an extension
* and the Windows createProcess will be looking for .exe.
* The comparison is case insensitive based on the name.
* @param executablePath the executable file
* @return true if the path ends in .exe or does not have an extension.
*/
private boolean isExe(String executablePath) {
File file = new File(executablePath);
String upName = file.getName().toUpperCase(Locale.ROOT);
return (upName.endsWith(".EXE") || upName.indexOf('.') < 0);
}
// Old version that can be bypassed
private boolean isShellFile(String executablePath) {
String upPath = executablePath.toUpperCase();
return (upPath.endsWith(".CMD") || upPath.endsWith(".BAT"));
}
private String quoteString(String arg) {
StringBuilder argbuf = new StringBuilder(arg.length() + 2);
return argbuf.append('"').append(arg).append('"').toString();
}
// Count backslashes before start index of string.
// .bat files don't include backslashes as part of the quote
private static int countLeadingBackslash(int verificationType,
CharSequence input, int start) {
if (verificationType == VERIFICATION_CMD_BAT)
return 0;
int j;
for (j = start - 1; j >= 0 && input.charAt(j) == BACKSLASH; j--) {
// just scanning backwards
}
return (start - 1) - j; // number of BACKSLASHES
}
private static final char DOUBLEQUOTE = '\"';
private static final char BACKSLASH = '\\';
private WinNT.HANDLE handle;
private OutputStream stdin_stream;
private InputStream stdout_stream;
private InputStream stderr_stream;
private ProcessImplForWin32(
String username,
String password,
String cmd[],
final String envblock,
final String path,
final long[] stdHandles,
final boolean redirectErrorStream)
throws IOException
{
String cmdstr;
final SecurityManager security = System.getSecurityManager();
GetPropertyAction action = new GetPropertyAction("jdk.lang.Process.allowAmbiguousCommands",
(security == null) ? "true" : "false");
final boolean allowAmbiguousCommands = !"false".equalsIgnoreCase(action.run());
if (allowAmbiguousCommands && security == null) {
// Legacy mode.
// Normalize path if possible.
String executablePath = new File(cmd[0]).getPath();
// No worry about internal, unpaired ["], and redirection/piping.
if (needsEscaping(VERIFICATION_LEGACY, executablePath) )
executablePath = quoteString(executablePath);
cmdstr = createCommandLine(
//legacy mode doesn't worry about extended verification
VERIFICATION_LEGACY,
executablePath,
cmd);
} else {
String executablePath;
try {
executablePath = getExecutablePath(cmd[0]);
} catch (IllegalArgumentException e) {
// Workaround for the calls like
// Runtime.getRuntime().exec("\"C:\\Program Files\\foo\" bar")
// No chance to avoid CMD/BAT injection, except to do the work
// right from the beginning. Otherwise we have too many corner
// cases from
// Runtime.getRuntime().exec(String[] cmd [, ...])
// calls with internal ["] and escape sequences.
// Restore original command line.
StringBuilder join = new StringBuilder();
// terminal space in command line is ok
for (String s : cmd)
join.append(s).append(' ');
// Parse the command line again.
cmd = getTokensFromCommand(join.toString());
executablePath = getExecutablePath(cmd[0]);
// Check new executable name once more
if (security != null)
security.checkExec(executablePath);
}
// Quotation protects from interpretation of the [path] argument as
// start of longer path with spaces. Quotation has no influence to
// [.exe] extension heuristic.
boolean isShell = allowAmbiguousCommands ? isShellFile(executablePath)
: !isExe(executablePath);
cmdstr = createCommandLine(
// We need the extended verification procedures
isShell ? VERIFICATION_CMD_BAT
: (allowAmbiguousCommands ? VERIFICATION_WIN32 : VERIFICATION_WIN32_SAFE),
quoteString(executablePath),
cmd);
}
handle = create(username, password, cmdstr, envblock, path, stdHandles, redirectErrorStream);
AccessController.doPrivileged(
new PrivilegedAction<Void>() {
public Void run() {
if (stdHandles[0] == -1L)
stdin_stream = ProcessBuilderForWin32.NullOutputStream.INSTANCE;
else {
FileDescriptor stdin_fd = new FileDescriptor();
fdAccess.setHandle(stdin_fd, stdHandles[0]);
stdin_stream = new BufferedOutputStream(
new FileOutputStream(stdin_fd));
}
if (stdHandles[1] == -1L)
stdout_stream = ProcessBuilderForWin32.NullInputStream.INSTANCE;
else {
FileDescriptor stdout_fd = new FileDescriptor();
fdAccess.setHandle(stdout_fd, stdHandles[1]);
stdout_stream = new BufferedInputStream(
new FileInputStream(stdout_fd));
}
if (stdHandles[2] == -1L)
stderr_stream = ProcessBuilderForWin32.NullInputStream.INSTANCE;
else {
FileDescriptor stderr_fd = new FileDescriptor();
fdAccess.setHandle(stderr_fd, stdHandles[2]);
stderr_stream = new FileInputStream(stderr_fd);
}
return null; }});
}
public OutputStream getOutputStream() {
return stdin_stream;
}
public InputStream getInputStream() {
return stdout_stream;
}
public InputStream getErrorStream() {
return stderr_stream;
}
protected void finalize() {
closeHandle(handle);
}
public int exitValue() {
int exitCode = getExitCodeProcess(handle);
if (exitCode == STILL_ACTIVE)
throw new IllegalThreadStateException("process has not exited");
return exitCode;
}
public int waitFor() throws InterruptedException {
waitForInterruptibly(handle);
if (Thread.interrupted())
throw new InterruptedException();
return exitValue();
}
@Override
public boolean waitFor(long timeout, TimeUnit unit)
throws InterruptedException
{
if (getExitCodeProcess(handle) != STILL_ACTIVE) return true;
if (timeout <= 0) return false;
long remainingNanos = unit.toNanos(timeout);
long deadline = System.nanoTime() + remainingNanos ;
do {
// Round up to next millisecond
long msTimeout = TimeUnit.NANOSECONDS.toMillis(remainingNanos + 999_999L);
waitForTimeoutInterruptibly(handle, msTimeout);
if (Thread.interrupted())
throw new InterruptedException();
if (getExitCodeProcess(handle) != STILL_ACTIVE) {
return true;
}
remainingNanos = deadline - System.nanoTime();
} while (remainingNanos > 0);
return (getExitCodeProcess(handle) != STILL_ACTIVE);
}
public void destroy() { terminateProcess(handle); }
public Process destroyForcibly() {
destroy();
return this;
}
public boolean isAlive() {
return isProcessAlive(handle);
}
private static boolean initHolder(WinNT.HANDLEByReference pjhandles,
WinNT.HANDLEByReference[] pipe,
int offset,
WinNT.HANDLEByReference phStd) {
if (!pjhandles.getValue().equals(JAVA_INVALID_HANDLE_VALUE)) {
phStd.setValue(pjhandles.getValue());
pjhandles.setValue(JAVA_INVALID_HANDLE_VALUE);
} else {
if (!Kernel32.INSTANCE.CreatePipe(pipe[0], pipe[1], null, PIPE_SIZE)) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
} else {
WinNT.HANDLE thisProcessEnd = offset == OFFSET_READ ? pipe[1].getValue() : pipe[0].getValue();
phStd.setValue(pipe[offset].getValue());
pjhandles.setValue(thisProcessEnd);
}
}
Kernel32.INSTANCE.SetHandleInformation(phStd.getValue(), Kernel32.HANDLE_FLAG_INHERIT, Kernel32.HANDLE_FLAG_INHERIT);
return true;
}
private static void releaseHolder(boolean complete, WinNT.HANDLEByReference[] pipe, int offset) {
closeHandle(pipe[offset].getValue());
if (complete) {
closeHandle(pipe[offset == OFFSET_READ ? OFFSET_WRITE : OFFSET_READ].getValue());
}
}
private static void prepareIOEHandleState(WinNT.HANDLE[] stdIOE, Boolean[] inherit) {
for(int i = 0; i < HANDLE_STORAGE_SIZE; ++i) {
WinNT.HANDLE hstd = stdIOE[i];
if (!Kernel32.INVALID_HANDLE_VALUE.equals(hstd)) {
inherit[i] = Boolean.TRUE;
Kernel32.INSTANCE.SetHandleInformation(hstd, Kernel32.HANDLE_FLAG_INHERIT, 0);
}
}
}
private static void restoreIOEHandleState(WinNT.HANDLE[] stdIOE, Boolean[] inherit) {
for (int i = HANDLE_STORAGE_SIZE - 1; i >= 0; --i) {
if (!Kernel32.INVALID_HANDLE_VALUE.equals(stdIOE[i])) {
Kernel32.INSTANCE.SetHandleInformation(stdIOE[i], Kernel32.HANDLE_FLAG_INHERIT, inherit[i] ? Kernel32.HANDLE_FLAG_INHERIT : 0);
}
}
}
private static WinNT.HANDLE processCreate(String username,
String password,
String cmd,
final String envblock,
final String path,
final WinNT.HANDLEByReference[] stdHandles,
final boolean redirectErrorStream) {
WinNT.HANDLE ret = new WinNT.HANDLE(Pointer.createConstant(0));
WinNT.HANDLE[] stdIOE = new WinNT.HANDLE[] {
Kernel32.INVALID_HANDLE_VALUE, Kernel32.INVALID_HANDLE_VALUE, Kernel32.INVALID_HANDLE_VALUE,
stdHandles[0].getValue(), stdHandles[1].getValue(), stdHandles[2].getValue()
};
stdIOE[0] = Kernel32.INSTANCE.GetStdHandle(Kernel32.STD_INPUT_HANDLE);
stdIOE[1] = Kernel32.INSTANCE.GetStdHandle(Kernel32.STD_OUTPUT_HANDLE);
stdIOE[2] = Kernel32.INSTANCE.GetStdHandle(Kernel32.STD_ERROR_HANDLE);
Boolean[] inherit = new Boolean[] {
Boolean.FALSE, Boolean.FALSE, Boolean.FALSE,
Boolean.FALSE, Boolean.FALSE, Boolean.FALSE
};
prepareIOEHandleState(stdIOE, inherit);
// input
WinNT.HANDLEByReference hStdInput = new WinNT.HANDLEByReference();
WinNT.HANDLEByReference[] pipeIn = new WinNT.HANDLEByReference[] {
new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE), new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE) };
// output
WinNT.HANDLEByReference hStdOutput = new WinNT.HANDLEByReference();
WinNT.HANDLEByReference[] pipeOut = new WinNT.HANDLEByReference[] {
new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE), new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE) };
// error
WinNT.HANDLEByReference hStdError = new WinNT.HANDLEByReference();
WinNT.HANDLEByReference[] pipeError = new WinNT.HANDLEByReference[] {
new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE), new WinNT.HANDLEByReference(Kernel32.INVALID_HANDLE_VALUE) };
boolean success;
if (initHolder(stdHandles[0], pipeIn, OFFSET_READ, hStdInput)) {
if (initHolder(stdHandles[1], pipeOut, OFFSET_WRITE, hStdOutput)) {
WinBase.STARTUPINFO si = new WinBase.STARTUPINFO();
si.hStdInput = hStdInput.getValue();
si.hStdOutput = hStdOutput.getValue();
if (redirectErrorStream) {
si.hStdError = si.hStdOutput;
stdHandles[2].setValue(JAVA_INVALID_HANDLE_VALUE);
success = true;
} else {
success = initHolder(stdHandles[2], pipeError, OFFSET_WRITE, hStdError);
si.hStdError = hStdError.getValue();
}
if (success) {
WTypes.LPSTR lpEnvironment = envblock == null ? new WTypes.LPSTR() : new WTypes.LPSTR(envblock);
Kernel32.PROCESS_INFORMATION pi = new WinBase.PROCESS_INFORMATION();
si.dwFlags = Kernel32.STARTF_USESTDHANDLES;
if (!Advapi32.INSTANCE.CreateProcessWithLogonW(
username
, null
, password
, Advapi32.LOGON_WITH_PROFILE
, null
, cmd
, Kernel32.CREATE_NO_WINDOW
, lpEnvironment.getPointer()
, path
, si
, pi)) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
} else {
closeHandle(pi.hThread);
ret = pi.hProcess;
}
}
releaseHolder(ret.getPointer().equals(Pointer.createConstant(0)), pipeError, OFFSET_WRITE);
releaseHolder(ret.getPointer().equals(Pointer.createConstant(0)), pipeOut, OFFSET_WRITE);
}
releaseHolder(ret.getPointer().equals(Pointer.createConstant(0)), pipeIn, OFFSET_READ);
}
restoreIOEHandleState(stdIOE, inherit);
return ret;
}
private static synchronized WinNT.HANDLE create(String username,
String password,
String cmd,
final String envblock,
final String path,
final long[] stdHandles,
final boolean redirectErrorStream) {
WinNT.HANDLE ret = new WinNT.HANDLE(Pointer.createConstant(0));
WinNT.HANDLEByReference[] handles = new WinNT.HANDLEByReference[stdHandles.length];
for (int i = 0; i < stdHandles.length; i++) {
handles[i] = new WinNT.HANDLEByReference(new WinNT.HANDLE(Pointer.createConstant(stdHandles[i])));
}
if (cmd != null) {
if (username != null && password != null) {
ret = processCreate(username, password, cmd, envblock, path, handles, redirectErrorStream);
}
}
for (int i = 0; i < stdHandles.length; i++) {
stdHandles[i] = handles[i].getPointer().getLong(0);
}
return ret;
}
private static int getExitCodeProcess(WinNT.HANDLE handle) {
IntByReference exitStatus = new IntByReference();
if (!Kernel32.INSTANCE.GetExitCodeProcess(handle, exitStatus)) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
}
return exitStatus.getValue();
}
private static void terminateProcess(WinNT.HANDLE handle) {
Kernel32.INSTANCE.TerminateProcess(handle, 1);
}
private static boolean isProcessAlive(WinNT.HANDLE handle) {
IntByReference exitStatus = new IntByReference();
Kernel32.INSTANCE.GetExitCodeProcess(handle, exitStatus);
return exitStatus.getValue() == STILL_ACTIVE;
}
private static void closeHandle(WinNT.HANDLE handle) {
Kernel32Util.closeHandle(handle);
}
/**
* Opens a file for atomic append. The file is created if it doesn't
* already exist.
*
* @param path the file to open or create
* @return the native HANDLE
*/
private static long openForAtomicAppend(String path) throws IOException {
int access = Kernel32.GENERIC_READ | Kernel32.GENERIC_WRITE;
int sharing = Kernel32.FILE_SHARE_READ | Kernel32.FILE_SHARE_WRITE;
int disposition = Kernel32.OPEN_ALWAYS;
int flagsAndAttributes = Kernel32.FILE_ATTRIBUTE_NORMAL;
if (path == null || path.isEmpty()) {
return -1;
} else {
WinNT.HANDLE handle = Kernel32.INSTANCE.CreateFile(path, access, sharing, null, disposition, flagsAndAttributes, null);
if (handle == Kernel32.INVALID_HANDLE_VALUE) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
}
return handle.getPointer().getLong(0);
}
}
private static void waitForInterruptibly(WinNT.HANDLE handle) {
int result = Kernel32.INSTANCE.WaitForMultipleObjects(1, new WinNT.HANDLE[]{handle}, false, Kernel32.INFINITE);
if (result == Kernel32.WAIT_FAILED) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
}
}
private static void waitForTimeoutInterruptibly(WinNT.HANDLE handle, long timeout) {
int result = Kernel32.INSTANCE.WaitForMultipleObjects(1, new WinNT.HANDLE[]{handle}, false, (int) timeout);
if (result == Kernel32.WAIT_FAILED) {
throw new Win32Exception(Kernel32.INSTANCE.GetLastError());
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,111 | [BUG] sun.misc.JavaIOFileDescriptorAccess is not portable | Current implementation relies on `sun.misc.JavaIOFileDescriptorAccess` which is only accessible on oraclejdk8.
Basically the demand is getting & setting `field` field of `FileDescriptor`, so we can directly do that with reflection.
Though, I suspect the necessity we introduce `ProcessImplForWin32`. Maybe we could have a better way to support worker server to run bat script. | https://github.com/apache/dolphinscheduler/issues/2111 | https://github.com/apache/dolphinscheduler/pull/2113 | 450a1f56fc73f088fce89a343a0b008706f2088c | 9224b49b58b756d22c75d8929108f716283282b4 | "2020-03-08T05:09:46Z" | java | "2020-03-09T11:06:41Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.shell;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.OSUtils;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.ApplicationContext;
import java.util.Date;
/**
* shell task test
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest(OSUtils.class)
public class ShellTaskTest {
private static final Logger logger = LoggerFactory.getLogger(ShellTaskTest.class);
private ShellTask shellTask;
private ProcessService processService;
private ShellCommandExecutor shellCommandExecutor;
private ApplicationContext applicationContext;
@Before
public void before() throws Exception {
PowerMockito.mockStatic(OSUtils.class);
processService = PowerMockito.mock(ProcessService.class);
shellCommandExecutor = PowerMockito.mock(ShellCommandExecutor.class);
applicationContext = PowerMockito.mock(ApplicationContext.class);
SpringApplicationContext springApplicationContext = new SpringApplicationContext();
springApplicationContext.setApplicationContext(applicationContext);
PowerMockito.when(applicationContext.getBean(ProcessService.class)).thenReturn(processService);
TaskProps props = new TaskProps();
props.setTaskDir("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstId(1);
props.setTenantCode("1");
props.setEnvFile(".dolphinscheduler_env.sh");
props.setTaskStartTime(new Date());
props.setTaskTimeout(0);
props.setTaskParams("{\"rawScript\": \" echo 'hello world!'\"}");
shellTask = new ShellTask(props, logger);
shellTask.init();
PowerMockito.when(processService.findDataSourceById(1)).thenReturn(getDataSource());
PowerMockito.when(processService.findDataSourceById(2)).thenReturn(getDataSource());
PowerMockito.when(processService.findProcessInstanceByTaskId(1)).thenReturn(getProcessInstance());
String fileName = String.format("%s/%s_node.%s", props.getTaskDir(), props.getTaskAppId(), OSUtils.isWindows() ? "bat" : "sh");
PowerMockito.when(shellCommandExecutor.run(fileName, processService)).thenReturn(0);
}
private DataSource getDataSource() {
DataSource dataSource = new DataSource();
dataSource.setType(DbType.MYSQL);
dataSource.setConnectionParams(
"{\"user\":\"root\",\"password\":\"123456\",\"address\":\"jdbc:mysql://127.0.0.1:3306\",\"database\":\"test\",\"jdbcUrl\":\"jdbc:mysql://127.0.0.1:3306/test\"}");
dataSource.setUserId(1);
return dataSource;
}
private ProcessInstance getProcessInstance() {
ProcessInstance processInstance = new ProcessInstance();
processInstance.setCommandType(CommandType.START_PROCESS);
processInstance.setScheduleTime(new Date());
return processInstance;
}
@After
public void after() {}
/**
* Method: ShellTask()
*/
@Test
public void testShellTask()
throws Exception {
TaskProps props = new TaskProps();
props.setTaskDir("/tmp");
props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
props.setTaskInstId(1);
props.setTenantCode("1");
ShellTask shellTaskTest = new ShellTask(props, logger);
Assert.assertNotNull(shellTaskTest);
}
/**
* Method: init for Unix-like
*/
@Test
public void testInitForUnix() {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(false);
shellTask.init();
Assert.assertTrue(true);
} catch (Error | Exception e) {
logger.error(e.getMessage());
}
}
/**
* Method: init for Windows
*/
@Test
public void testInitForWindows() {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(true);
shellTask.init();
Assert.assertTrue(true);
} catch (Error | Exception e) {
logger.error(e.getMessage());
}
}
/**
* Method: handle() for Unix-like
*/
@Test
public void testHandleForUnix() throws Exception {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(false);
shellTask.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")
&& !System.getProperty("os.name").startsWith("Windows")) {
logger.error(e.getMessage());
}
}
}
/**
* Method: handle() for Windows
*/
@Test
public void testHandleForWindows() throws Exception {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(true);
shellTask.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")) {
logger.error(e.getMessage());
}
}
}
/**
* Method: cancelApplication()
*/
@Test
public void testCancelApplication() throws Exception {
try {
shellTask.cancelApplication(true);
Assert.assertTrue(true);
} catch (Error | Exception e) {
logger.error(e.getMessage());
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,178 | [BUG] SqlTask kerberos authentication process | *For better global communication, please give priority to using English description, thx! *
**Describe the bug**
When the keytab is expired or deactivated, other tasks that did not require authentication also failed. Obviously there is something wrong with the kerberos authentication process and it should only do it when it is needed.
**Which version of Dolphin Scheduler:**
-[1.2.1-release]
**Requirement or improvement
- Change CommonUtils.loadKerberosConf() call scope;
| https://github.com/apache/dolphinscheduler/issues/2178 | https://github.com/apache/dolphinscheduler/pull/2321 | a851168a350e300becb3452e91871220ffa3a5fc | d4735334a1986fedd5b92d185dc6d46be93cb071 | "2020-03-14T11:25:36Z" | java | "2020-03-28T09:42:33Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.sql;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.serializer.SerializerFeature;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlBinds;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.datasource.BaseDataSource;
import org.apache.dolphinscheduler.dao.datasource.DataSourceFactory;
import org.apache.dolphinscheduler.dao.entity.DataSource;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.UDFUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import java.sql.*;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
import static org.apache.dolphinscheduler.common.enums.DbType.HIVE;
/**
* sql task
*/
public class SqlTask extends AbstractTask {
/**
* sql parameters
*/
private SqlParameters sqlParameters;
/**
* process service
*/
private ProcessService processService;
/**
* alert dao
*/
private AlertDao alertDao;
/**
* datasource
*/
private DataSource dataSource;
/**
* base datasource
*/
private BaseDataSource baseDataSource;
public SqlTask(TaskProps taskProps, Logger logger) {
super(taskProps, logger);
logger.info("sql task params {}", taskProps.getTaskParams());
this.sqlParameters = JSON.parseObject(taskProps.getTaskParams(), SqlParameters.class);
if (!sqlParameters.checkParameters()) {
throw new RuntimeException("sql task params is not valid");
}
this.processService = SpringApplicationContext.getBean(ProcessService.class);
this.alertDao = SpringApplicationContext.getBean(AlertDao.class);
}
@Override
public void handle() throws Exception {
// set the name of the current thread
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskProps.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
logger.info("Full sql parameters: {}", sqlParameters);
logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {}",
sqlParameters.getType(),
sqlParameters.getDatasource(),
sqlParameters.getSql(),
sqlParameters.getLocalParams(),
sqlParameters.getUdfs(),
sqlParameters.getShowType(),
sqlParameters.getConnParams());
// not set data source
if (sqlParameters.getDatasource() == 0){
logger.error("datasource id not exists");
exitStatusCode = -1;
return;
}
dataSource= processService.findDataSourceById(sqlParameters.getDatasource());
// data source is null
if (dataSource == null){
logger.error("datasource not exists");
exitStatusCode = -1;
return;
}
logger.info("datasource name : {} , type : {} , desc : {} , user_id : {} , parameter : {}",
dataSource.getName(),
dataSource.getType(),
dataSource.getNote(),
dataSource.getUserId(),
dataSource.getConnectionParams());
List<String> createFuncs = null;
try {
// load class
DataSourceFactory.loadClass(dataSource.getType());
// get datasource
baseDataSource = DataSourceFactory.getDatasource(dataSource.getType(),
dataSource.getConnectionParams());
// ready to execute SQL and parameter entity Map
SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql());
List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements())
.orElse(new ArrayList<>())
.stream()
.map(this::getSqlAndSqlParamsMap)
.collect(Collectors.toList());
// determine if it is UDF
boolean udfTypeFlag = EnumUtils.isValidEnum(UdfType.class, sqlParameters.getType())
&& StringUtils.isNotEmpty(sqlParameters.getUdfs());
if(udfTypeFlag){
String[] ids = sqlParameters.getUdfs().split(",");
int[] idsArray = new int[ids.length];
for(int i=0;i<ids.length;i++){
idsArray[i]=Integer.parseInt(ids[i]);
}
// check udf permission
checkUdfPermission(ArrayUtils.toObject(idsArray));
List<UdfFunc> udfFuncList = processService.queryUdfFunListByids(idsArray);
createFuncs = UDFUtils.createFuncs(udfFuncList, taskProps.getTenantCode(), logger);
}
// execute sql task
executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs);
} catch (Exception e) {
logger.error(e.getMessage(), e);
throw e;
}
}
/**
* ready to execute SQL and parameter entity Map
* @return
*/
private SqlBinds getSqlAndSqlParamsMap(String sql) {
Map<Integer,Property> sqlParamsMap = new HashMap<>();
StringBuilder sqlBuilder = new StringBuilder();
// find process instance by task id
Map<String, Property> paramsMap = ParamUtils.convert(taskProps.getUserDefParamsMap(),
taskProps.getDefinedParams(),
sqlParameters.getLocalParametersMap(),
taskProps.getCmdTypeIfComplement(),
taskProps.getScheduleTime());
// spell SQL according to the final user-defined variable
if(paramsMap == null){
sqlBuilder.append(sql);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
if (StringUtils.isNotEmpty(sqlParameters.getTitle())){
String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(),
ParamUtils.convert(paramsMap));
logger.info("SQL title : {}",title);
sqlParameters.setTitle(title);
}
//new
//replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job
sql = ParameterUtils.replaceScheduleTime(sql, taskProps.getScheduleTime(), paramsMap);
// special characters need to be escaped, ${} needs to be escaped
String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*";
setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap);
// replace the ${} of the SQL statement with the Placeholder
String formatSql = sql.replaceAll(rgex,"?");
sqlBuilder.append(formatSql);
// print repalce sql
printReplacedSql(sql,formatSql,rgex,sqlParamsMap);
return new SqlBinds(sqlBuilder.toString(), sqlParamsMap);
}
@Override
public AbstractParameters getParameters() {
return this.sqlParameters;
}
/**
* execute function and sql
* @param mainSqlBinds main sql binds
* @param preStatementsBinds pre statements binds
* @param postStatementsBinds post statements binds
* @param createFuncs create functions
*/
public void executeFuncAndSql(SqlBinds mainSqlBinds,
List<SqlBinds> preStatementsBinds,
List<SqlBinds> postStatementsBinds,
List<String> createFuncs){
Connection connection = null;
try {
// if upload resource is HDFS and kerberos startup
CommonUtils.loadKerberosConf();
// if hive , load connection params if exists
if (HIVE == dataSource.getType()) {
Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword());
Map<String, String> connParamMap = CollectionUtils.stringToMap(sqlParameters.getConnParams(),
SEMICOLON,
HIVE_CONF);
paramProp.putAll(connParamMap);
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
paramProp);
}else{
connection = DriverManager.getConnection(baseDataSource.getJdbcUrl(),
baseDataSource.getUser(),
baseDataSource.getPassword());
}
// create temp function
if (CollectionUtils.isNotEmpty(createFuncs)) {
try (Statement funcStmt = connection.createStatement()) {
for (String createFunc : createFuncs) {
logger.info("hive create function sql: {}", createFunc);
funcStmt.execute(createFunc);
}
}
}
for (SqlBinds sqlBind: preStatementsBinds) {
try (PreparedStatement stmt = prepareStatementAndBind(connection, sqlBind)) {
int result = stmt.executeUpdate();
logger.info("pre statement execute result: {}, for sql: {}",result,sqlBind.getSql());
}
}
try (PreparedStatement stmt = prepareStatementAndBind(connection, mainSqlBinds);
ResultSet resultSet = stmt.executeQuery()) {
// decide whether to executeQuery or executeUpdate based on sqlType
if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) {
// query statements need to be convert to JsonArray and inserted into Alert to send
JSONArray resultJSONArray = new JSONArray();
ResultSetMetaData md = resultSet.getMetaData();
int num = md.getColumnCount();
while (resultSet.next()) {
JSONObject mapOfColValues = new JSONObject(true);
for (int i = 1; i <= num; i++) {
mapOfColValues.put(md.getColumnName(i), resultSet.getObject(i));
}
resultJSONArray.add(mapOfColValues);
}
logger.debug("execute sql : {}", JSON.toJSONString(resultJSONArray, SerializerFeature.WriteMapNullValue));
// if there is a result set
if ( !resultJSONArray.isEmpty() ) {
if (StringUtils.isNotEmpty(sqlParameters.getTitle())) {
sendAttachment(sqlParameters.getTitle(),
JSON.toJSONString(resultJSONArray, SerializerFeature.WriteMapNullValue));
}else{
sendAttachment(taskProps.getNodeName() + " query resultsets ",
JSON.toJSONString(resultJSONArray, SerializerFeature.WriteMapNullValue));
}
}
exitStatusCode = 0;
} else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) {
// non query statement
stmt.executeUpdate();
exitStatusCode = 0;
}
}
for (SqlBinds sqlBind: postStatementsBinds) {
try (PreparedStatement stmt = prepareStatementAndBind(connection, sqlBind)) {
int result = stmt.executeUpdate();
logger.info("post statement execute result: {},for sql: {}",result,sqlBind.getSql());
}
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage());
} finally {
ConnectionUtils.releaseResource(connection);
}
}
/**
* preparedStatement bind
* @param connection
* @param sqlBinds
* @return
* @throws Exception
*/
private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception {
// is the timeout set
boolean timeoutFlag = taskProps.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED ||
taskProps.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED;
// prepare statement
PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql());
if(timeoutFlag){
stmt.setQueryTimeout(taskProps.getTaskTimeout());
}
Map<Integer, Property> params = sqlBinds.getParamsMap();
if(params != null) {
for (Map.Entry<Integer, Property> entry : params.entrySet()) {
Property prop = entry.getValue();
ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue());
}
}
logger.info("prepare statement replace sql : {} ", stmt);
return stmt;
}
/**
* send mail as an attachment
* @param title title
* @param content content
*/
public void sendAttachment(String title,String content){
// process instance
ProcessInstance instance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
List<User> users = alertDao.queryUserByAlertGroupId(instance.getWarningGroupId());
// receiving group list
List<String> receviersList = new ArrayList<>();
for(User user:users){
receviersList.add(user.getEmail().trim());
}
// custom receiver
String receivers = sqlParameters.getReceivers();
if (StringUtils.isNotEmpty(receivers)){
String[] splits = receivers.split(COMMA);
for (String receiver : splits){
receviersList.add(receiver.trim());
}
}
// copy list
List<String> receviersCcList = new ArrayList<>();
// Custom Copier
String receiversCc = sqlParameters.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)){
String[] splits = receiversCc.split(COMMA);
for (String receiverCc : splits){
receviersCcList.add(receiverCc.trim());
}
}
String showTypeName = sqlParameters.getShowType().replace(COMMA,"").trim();
if(EnumUtils.isValidEnum(ShowType.class,showTypeName)){
Map<String, Object> mailResult = MailUtils.sendMails(receviersList,
receviersCcList, title, content, ShowType.valueOf(showTypeName));
if(!(boolean) mailResult.get(STATUS)){
throw new RuntimeException("send mail failed!");
}
}else{
logger.error("showType: {} is not valid " ,showTypeName);
throw new RuntimeException(String.format("showType: %s is not valid ",showTypeName));
}
}
/**
* regular expressions match the contents between two specified strings
* @param content content
* @param rgex rgex
* @param sqlParamsMap sql params map
* @param paramsPropsMap params props map
*/
public void setSqlParamsMap(String content, String rgex, Map<Integer,Property> sqlParamsMap, Map<String,Property> paramsPropsMap){
Pattern pattern = Pattern.compile(rgex);
Matcher m = pattern.matcher(content);
int index = 1;
while (m.find()) {
String paramName = m.group(1);
Property prop = paramsPropsMap.get(paramName);
sqlParamsMap.put(index,prop);
index ++;
}
}
/**
* print replace sql
* @param content content
* @param formatSql format sql
* @param rgex rgex
* @param sqlParamsMap sql params map
*/
public void printReplacedSql(String content, String formatSql,String rgex, Map<Integer,Property> sqlParamsMap){
//parameter print style
logger.info("after replace sql , preparing : {}" , formatSql);
StringBuilder logPrint = new StringBuilder("replaced sql , parameters:");
for(int i=1;i<=sqlParamsMap.size();i++){
logPrint.append(sqlParamsMap.get(i).getValue()+"("+sqlParamsMap.get(i).getType()+")");
}
logger.info("Sql Params are {}", logPrint);
}
/**
* check udf function permission
* @param udfFunIds udf functions
* @return if has download permission return true else false
*/
private void checkUdfPermission(Integer[] udfFunIds) throws Exception{
// process instance
ProcessInstance processInstance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
int userId = processInstance.getExecutorId();
PermissionCheck<Integer> permissionCheckUdf = new PermissionCheck<>(AuthorizationType.UDF, processService,udfFunIds,userId,logger);
permissionCheckUdf.checkPermission();
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | dolphinscheduler-dao/src/main/resources/application.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# base spring data source configuration
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
# postgre
spring.datasource.driver-class-name=org.postgresql.Driver
spring.datasource.url=jdbc:postgresql://localhost:5432/dolphinscheduler
# mysql
#spring.datasource.driver-class-name=com.mysql.jdbc.Driver
#spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
# h2
#spring.datasource.driver-class-name=org.h2.Driver
#spring.datasource.url=jdbc:h2:file:../sql/h2;AUTO_SERVER=TRUE
spring.datasource.username=test
spring.datasource.password=test
# connection configuration
spring.datasource.initialSize=5
# min connection number
spring.datasource.minIdle=5
# max connection number
spring.datasource.maxActive=50
# max wait time for get a connection in milliseconds. if configuring maxWait, fair locks are enabled by default and concurrency efficiency decreases.
# If necessary, unfair locks can be used by configuring the useUnfairLock attribute to true.
spring.datasource.maxWait=60000
# milliseconds for check to close free connections
spring.datasource.timeBetweenEvictionRunsMillis=60000
# the Destroy thread detects the connection interval and closes the physical connection in milliseconds if the connection idle time is greater than or equal to minEvictableIdleTimeMillis.
spring.datasource.timeBetweenConnectErrorMillis=60000
# the longest time a connection remains idle without being evicted, in milliseconds
spring.datasource.minEvictableIdleTimeMillis=300000
#the SQL used to check whether the connection is valid requires a query statement. If validation Query is null, testOnBorrow, testOnReturn, and testWhileIdle will not work.
spring.datasource.validationQuery=SELECT 1
#check whether the connection is valid for timeout, in seconds
spring.datasource.validationQueryTimeout=3
# when applying for a connection, if it is detected that the connection is idle longer than time Between Eviction Runs Millis,
# validation Query is performed to check whether the connection is valid
spring.datasource.testWhileIdle=true
#execute validation to check if the connection is valid when applying for a connection
spring.datasource.testOnBorrow=true
#execute validation to check if the connection is valid when the connection is returned
spring.datasource.testOnReturn=false
spring.datasource.defaultAutoCommit=true
spring.datasource.keepAlive=true
# open PSCache, specify count PSCache for every connection
spring.datasource.poolPreparedStatements=true
spring.datasource.maxPoolPreparedStatementPerConnectionSize=20
spring.datasource.spring.datasource.filters=stat,wall,log4j
spring.datasource.connectionProperties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000
#mybatis
mybatis-plus.mapper-locations=classpath*:/org.apache.dolphinscheduler.dao.mapper/*.xml
mybatis-plus.typeEnumsPackage=org.apache.dolphinscheduler.*.enums
#Entity scan, where multiple packages are separated by a comma or semicolon
mybatis-plus.typeAliasesPackage=org.apache.dolphinscheduler.dao.entity
#Primary key type AUTO:" database ID AUTO ", INPUT:" user INPUT ID", ID_WORKER:" global unique ID (numeric type unique ID)", UUID:" global unique ID UUID";
mybatis-plus.global-config.db-config.id-type=AUTO
#Field policy IGNORED:" ignore judgment ",NOT_NULL:" not NULL judgment "),NOT_EMPTY:" not NULL judgment"
mybatis-plus.global-config.db-config.field-strategy=NOT_NULL
#The hump underline is converted
mybatis-plus.global-config.db-config.column-underline=true
mybatis-plus.global-config.db-config.logic-delete-value=-1
mybatis-plus.global-config.db-config.logic-not-delete-value=0
mybatis-plus.global-config.db-config.banner=false
#The original configuration
mybatis-plus.configuration.map-underscore-to-camel-case=true
mybatis-plus.configuration.cache-enabled=false
mybatis-plus.configuration.call-setters-on-nulls=true
mybatis-plus.configuration.jdbc-type-for-null=null
# master settings
# master execute thread num
master.exec.threads=100
# master execute task number in parallel
master.exec.task.num=20
# master heartbeat interval
master.heartbeat.interval=10
# master commit task retry times
master.task.commit.retryTimes=5
# master commit task interval
master.task.commit.interval=1000
# only less than cpu avg load, master server can work. default value : the number of cpu cores * 2
master.max.cpuload.avg=100
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
master.reserved.memory=0.1
# worker settings
# worker execute thread num
worker.exec.threads=100
# worker heartbeat interval
worker.heartbeat.interval=10
# submit the number of tasks at a time
worker.fetch.task.num = 3
# only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2
worker.max.cpuload.avg=100
# only larger than reserved memory, worker server can work. default value : physical memory * 1/6, unit is G.
worker.reserved.memory=0.1
# data quality analysis is not currently in use. please ignore the following configuration
# task record
task.record.flag=false
task.record.datasource.url=jdbc:mysql://192.168.xx.xx:3306/etl?characterEncoding=UTF-8
task.record.datasource.username=xx
task.record.datasource.password=xx
# Logger Config
#logging.level.org.apache.dolphinscheduler.dao=debug
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentExecute.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.dependent;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.DependentItem;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
/**
* dependent item execute
*/
public class DependentExecute {
/**
* process service
*/
private final ProcessService processService = SpringApplicationContext.getBean(ProcessService.class);
/**
* depend item list
*/
private List<DependentItem> dependItemList;
/**
* dependent relation
*/
private DependentRelation relation;
/**
* depend result
*/
private DependResult modelDependResult = DependResult.WAITING;
/**
* depend result map
*/
private Map<String, DependResult> dependResultMap = new HashMap<>();
/**
* logger
*/
private Logger logger = LoggerFactory.getLogger(DependentExecute.class);
/**
* constructor
* @param itemList item list
* @param relation relation
*/
public DependentExecute(List<DependentItem> itemList, DependentRelation relation){
this.dependItemList = itemList;
this.relation = relation;
}
/**
* get dependent item for one dependent item
* @param dependentItem dependent item
* @param currentTime current time
* @return DependResult
*/
public DependResult getDependentResultForItem(DependentItem dependentItem, Date currentTime){
List<DateInterval> dateIntervals = DependentUtils.getDateIntervalList(currentTime, dependentItem.getDateValue());
return calculateResultForTasks(dependentItem, dateIntervals );
}
/**
* calculate dependent result for one dependent item.
* @param dependentItem dependent item
* @param dateIntervals date intervals
* @return dateIntervals
*/
private DependResult calculateResultForTasks(DependentItem dependentItem,
List<DateInterval> dateIntervals) {
DependResult result = DependResult.FAILED;
for(DateInterval dateInterval : dateIntervals){
ProcessInstance processInstance = findLastProcessInterval(dependentItem.getDefinitionId(),
dateInterval);
if(processInstance == null){
logger.error("cannot find the right process instance: definition id:{}, start:{}, end:{}",
dependentItem.getDefinitionId(), dateInterval.getStartTime(), dateInterval.getEndTime() );
return DependResult.FAILED;
}
if(dependentItem.getDepTasks().equals(Constants.DEPENDENT_ALL)){
result = getDependResultByState(processInstance.getState());
}else{
TaskInstance taskInstance = null;
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance task : taskInstanceList){
if(task.getName().equals(dependentItem.getDepTasks())){
taskInstance = task;
break;
}
}
if(taskInstance == null){
// cannot find task in the process instance
// maybe because process instance is running or failed.
result = getDependResultByState(processInstance.getState());
}else{
result = getDependResultByState(taskInstance.getState());
}
}
if(result != DependResult.SUCCESS){
break;
}
}
return result;
}
/**
* find the last one process instance that :
* 1. manual run and finish between the interval
* 2. schedule run and schedule time between the interval
* @param definitionId definition id
* @param dateInterval date interval
* @return ProcessInstance
*/
private ProcessInstance findLastProcessInterval(int definitionId, DateInterval dateInterval) {
ProcessInstance runningProcess = processService.findLastRunningProcess(definitionId, dateInterval);
if(runningProcess != null){
return runningProcess;
}
ProcessInstance lastSchedulerProcess = processService.findLastSchedulerProcessInterval(
definitionId, dateInterval
);
ProcessInstance lastManualProcess = processService.findLastManualProcessInterval(
definitionId, dateInterval
);
if(lastManualProcess ==null){
return lastSchedulerProcess;
}
if(lastSchedulerProcess == null){
return lastManualProcess;
}
return (lastManualProcess.getEndTime().after(lastSchedulerProcess.getEndTime()))?
lastManualProcess : lastSchedulerProcess;
}
/**
* get dependent result by task/process instance state
* @param state state
* @return DependResult
*/
private DependResult getDependResultByState(ExecutionStatus state) {
if(state.typeIsRunning() || state == ExecutionStatus.SUBMITTED_SUCCESS || state == ExecutionStatus.WAITTING_THREAD){
return DependResult.WAITING;
}else if(state.typeIsSuccess()){
return DependResult.SUCCESS;
}else{
return DependResult.FAILED;
}
}
/**
* judge depend item finished
* @param currentTime current time
* @return boolean
*/
public boolean finish(Date currentTime){
if(modelDependResult == DependResult.WAITING){
modelDependResult = getModelDependResult(currentTime);
return false;
}
return true;
}
/**
* get model depend result
* @param currentTime current time
* @return DependResult
*/
public DependResult getModelDependResult(Date currentTime){
List<DependResult> dependResultList = new ArrayList<>();
for(DependentItem dependentItem : dependItemList){
DependResult dependResult = getDependResultForItem(dependentItem, currentTime);
if(dependResult != DependResult.WAITING){
dependResultMap.put(dependentItem.getKey(), dependResult);
}
dependResultList.add(dependResult);
}
modelDependResult = DependentUtils.getDependResultForRelation(
this.relation, dependResultList
);
return modelDependResult;
}
/**
* get dependent item result
* @param item item
* @param currentTime current time
* @return DependResult
*/
public DependResult getDependResultForItem(DependentItem item, Date currentTime){
String key = item.getKey();
if(dependResultMap.containsKey(key)){
return dependResultMap.get(key);
}
return getDependentResultForItem(item, currentTime);
}
public Map<String, DependResult> getDependResultMap(){
return dependResultMap;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentTask.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.dependent;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.DependResult;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.dependent.DependentParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.DependentUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.worker.task.AbstractTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import java.util.*;
import static org.apache.dolphinscheduler.common.Constants.DEPENDENT_SPLIT;
/**
* Dependent Task
*/
public class DependentTask extends AbstractTask {
/**
* dependent task list
*/
private List<DependentExecute> dependentTaskList = new ArrayList<>();
/**
* depend item result map
* save the result to log file
*/
private Map<String, DependResult> dependResultMap = new HashMap<>();
/**
* dependent parameters
*/
private DependentParameters dependentParameters;
/**
* dependent date
*/
private Date dependentDate;
/**
* process service
*/
private ProcessService processService;
/**
* constructor
* @param props props
* @param logger logger
*/
public DependentTask(TaskProps props, Logger logger) {
super(props, logger);
}
@Override
public void init(){
logger.info("dependent task initialize");
this.dependentParameters = JSONUtils.parseObject(this.taskProps.getDependence(),
DependentParameters.class);
for(DependentTaskModel taskModel : dependentParameters.getDependTaskList()){
this.dependentTaskList.add(new DependentExecute(
taskModel.getDependItemList(), taskModel.getRelation()));
}
this.processService = SpringApplicationContext.getBean(ProcessService.class);
if(taskProps.getScheduleTime() != null){
this.dependentDate = taskProps.getScheduleTime();
}else{
this.dependentDate = taskProps.getTaskStartTime();
}
}
@Override
public void handle() throws Exception {
// set the name of the current thread
String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskProps.getTaskAppId());
Thread.currentThread().setName(threadLoggerInfoName);
try{
TaskInstance taskInstance = null;
while(Stopper.isRunning()){
taskInstance = processService.findTaskInstanceById(this.taskProps.getTaskInstId());
if(taskInstance == null){
exitStatusCode = -1;
break;
}
if(taskInstance.getState() == ExecutionStatus.KILL){
this.cancel = true;
}
if(this.cancel || allDependentTaskFinish()){
break;
}
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
}
if(cancel){
exitStatusCode = Constants.EXIT_CODE_KILL;
}else{
DependResult result = getTaskDependResult();
exitStatusCode = (result == DependResult.SUCCESS) ?
Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE;
}
}catch (Exception e){
logger.error(e.getMessage(),e);
exitStatusCode = -1;
throw e;
}
}
/**
* get dependent result
* @return DependResult
*/
private DependResult getTaskDependResult(){
List<DependResult> dependResultList = new ArrayList<>();
for(DependentExecute dependentExecute : dependentTaskList){
DependResult dependResult = dependentExecute.getModelDependResult(dependentDate);
dependResultList.add(dependResult);
}
DependResult result = DependentUtils.getDependResultForRelation(
this.dependentParameters.getRelation(), dependResultList
);
return result;
}
/**
* judge all dependent tasks finish
* @return whether all dependent tasks finish
*/
private boolean allDependentTaskFinish(){
boolean finish = true;
for(DependentExecute dependentExecute : dependentTaskList){
for(Map.Entry<String, DependResult> entry: dependentExecute.getDependResultMap().entrySet()) {
if(!dependResultMap.containsKey(entry.getKey())){
dependResultMap.put(entry.getKey(), entry.getValue());
//save depend result to log
logger.info("dependent item complete {} {},{}",
DEPENDENT_SPLIT, entry.getKey(), entry.getValue().toString());
}
}
if(!dependentExecute.finish(dependentDate)){
finish = false;
}
}
return finish;
}
@Override
public void cancelApplication(boolean cancelApplication) throws Exception {
// cancel process
this.cancel = true;
}
@Override
public AbstractParameters getParameters() {
return null;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/dependent/DependentTaskTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker.task.dependent;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DependentTaskTest {
private static final Logger logger = LoggerFactory.getLogger(DependentTaskTest.class);
@Test
public void testDependInit() throws Exception{
TaskProps taskProps = new TaskProps();
String dependString = "{\n" +
"\"dependTaskList\":[\n" +
" {\n" +
" \"dependItemList\":[\n" +
" {\n" +
" \"definitionId\": 101,\n" +
" \"depTasks\": \"ALL\",\n" +
" \"cycle\": \"day\",\n" +
" \"dateValue\": \"last1Day\"\n" +
" }\n" +
" ],\n" +
" \"relation\": \"AND\"\n" +
" }\n" +
" ],\n" +
"\"relation\":\"OR\"\n" +
"}";
taskProps.setTaskInstId(252612);
taskProps.setDependence(dependString);
DependentTask dependentTask = new DependentTask(taskProps, logger);
dependentTask.init();
dependentTask.handle();
Assert.assertEquals(dependentTask.getExitStatusCode(), Constants.EXIT_CODE_FAILURE );
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.cronutils.model.Cron;
import org.apache.commons.lang.ArrayUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.apache.dolphinscheduler.service.queue.ITaskQueue;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import java.util.*;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.toSet;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private WorkerGroupMapper workerGroupMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
/**
* task queue impl
*/
@Autowired
private ITaskQueue taskQueue;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
//cannot construct process instance, return null;
if(processInstance == null){
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if(!checkThreadNum(command, validThreadNum)){
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
delCommandByid(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
delCommandByid(command.getId());
}
/**
* set process waiting thread
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITTING_THREAD);
if(command.getCommandType() != CommandType.RECOVER_WAITTING_THREAD){
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionId());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null){
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
* @return command
*/
public Command findOneCommand(){
return commandMapper.getOneToRun();
}
/**
* check the input command exists in queue list
* @param command command
* @return create command result
*/
public Boolean verifyIsNeedCreateCommand(Command command){
Boolean isNeedCreate = true;
Map<CommandType,Integer> cmdTypeMap = new HashMap<CommandType,Integer>();
cmdTypeMap.put(CommandType.REPEAT_RUNNING,1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS,1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS,1);
CommandType commandType = command.getCommandType();
if(cmdTypeMap.containsKey(commandType)){
JSONObject cmdParamObj = (JSONObject) JSON.parse(command.getCommandParam());
JSONObject tempObj;
int processInstanceId = cmdParamObj.getInteger(CMDPARAM_RECOVER_PROCESS_ID_STRING);
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand:commands){
if(cmdTypeMap.containsKey(tmpCommand.getCommandType())){
tempObj = (JSONObject) JSON.parse(tmpCommand.getCommandParam());
if(tempObj != null && processInstanceId == tempObj.getInteger(CMDPARAM_RECOVER_PROCESS_ID_STRING)){
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId){
return processInstanceMapper.queryDetailById(processId);
}
/**
* find process instance by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId){
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* delete work process instance by id
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId){
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId){
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for(Integer subId : subProcessIdList ){
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* calculate sub process number in the process define.
* @param processDefinitionId processDefinitionId
* @return process thread num count
*/
private Integer workProcessThreadNumCount(Integer processDefinitionId){
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinitionId, ids);
return ids.size()+1;
}
/**
* recursive query sub process definition id by parent id.
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids){
ProcessDefinition processDefinition = processDefineMapper.selectById(parentId);
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
if (taskNodeList != null && taskNodeList.size() > 0){
for (TaskNode taskNode : taskNodeList){
String parameter = taskNode.getParams();
if (parameter.contains(CMDPARAM_SUB_PROCESS_DEFINE_ID)){
SubProcessParameters subProcessParam = JSON.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(),ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if(processInstance.getIsSubProcess() == Flag.YES){
if(originCommand != null){
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if(originCommand == null){
Command command = new Command(
CommandType.RECOVER_WAITTING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinitionId(),
JSONUtils.toJson(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getProcessInstancePriority()
);
saveCommand(command);
return ;
}
// update the command time if current command if recover from waiting
if(originCommand.getCommandType() == CommandType.RECOVER_WAITTING_THREAD){
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
}else{
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITTING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJson(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam){
Date scheduleTime = command.getScheduleTime();
if(scheduleTime == null){
if(cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)){
scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam){
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if(scheduleTime != null){
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
processInstance.setConnects(processDefinition.getConnects());
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
//copy process define json to process instance
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
int workerGroupId = command.getWorkerGroupId() == 0 ? -1 : command.getWorkerGroupId();
processInstance.setWorkerGroupId(workerGroupId);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId){
Tenant tenant = null;
if(tenantId >= 0){
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0){
return null;
}
if(tenant == null){
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam){
if(command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType()== TaskDependType.TASK_PRE){
if(cmdParam == null
|| !cmdParam.containsKey(Constants.CMDPARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMDPARAM_START_NODE_NAMES).isEmpty()){
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host){
ProcessInstance processInstance = null;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = null;
if(command.getProcessDefinitionId() != 0){
processDefinition = processDefineMapper.selectById(command.getProcessDefinitionId());
if(processDefinition == null){
logger.error("cannot find the work process define! define id : {}", command.getProcessDefinitionId());
return null;
}
}
if(cmdParam != null ){
Integer processInstanceId = 0;
// recover from failure or pause tasks
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
}else if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
// sub process map
String pId = cmdParam.get(Constants.CMDPARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
}else if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD)){
// waiting thread command
String pId = cmdParam.get(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if(processInstanceId ==0){
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}else{
processInstance = this.findProcessInstanceDetailById(processInstanceId);
}
processDefinition = processDefineMapper.selectById(processInstance.getProcessDefinitionId());
processInstance.setProcessDefinition(processDefinition);
//reset command parameter
if(processInstance.getCommandParam() != null){
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for(Map.Entry<String, String> entry: processCmdParam.entrySet()) {
if(!cmdParam.containsKey(entry.getKey())){
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
processInstance.setCommandParam(command.getCommandParam());
}
}else{
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if(!checkCmdParam(command, cmdParam)){
logger.error("command parameter check failed!");
return null;
}
if(command.getScheduleTime() != null){
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXEUTION;
int runTime = processInstance.getRunTimes();
switch (commandType){
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for(Integer taskId : failedList){
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
processInstance.setRunTimes(runTime +1 );
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITTING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for(Integer taskId : suspendedNodeList){
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
processInstance.setRunTimes(runTime +1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : validTaskList){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime +1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* return complement data if the process start with complement data
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command){
if(CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()){
return CommandType.COMPLEMENT_DATA;
}else{
return command.getCommandType();
}
}
/**
* initialize complement data parameters
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if(!processInstance.isComplementData()){
return;
}
Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE),
YYYY_MM_DD_HH_MM_SS);
processInstance.setScheduleTime(startComplementTime);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
* @param subProcessInstance subProcessInstance
* @return process instance
*/
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance){
String cmdParam = subProcessInstance.getCommandParam();
if(StringUtils.isEmpty(cmdParam)){
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if(paramMap.containsKey(CMDPARAM_SUB_PROCESS)
&& CMDPARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMDPARAM_SUB_PROCESS))){
paramMap.remove(CMDPARAM_SUB_PROCESS);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJson(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if(StringUtils.isNotEmpty(parentInstanceId)){
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if(parentInstance != null){
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
}else{
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if(processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0){
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams){
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String,String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for(Property parent : parentPropertyList){
if(!subMap.containsKey(parent.getProp())){
subPropertyList.add(parent);
}
}
return JSONUtils.toJson(subPropertyList);
}
/**
* initialize task instance
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance){
if(!taskInstance.isSubProcess()){
if(taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* submit task to db
* submit sub process to command
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance, ProcessInstance processInstance){
logger.info("start submit task : {}, instance id:{}, state: {}, ",
taskInstance.getName(), processInstance.getId(), processInstance.getState() );
processInstance = this.findProcessInstanceDetailById(processInstance.getId());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if(task == null){
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if(!task.getState().typeIsFinished()){
createSubWorkProcessCommand(processInstance, task);
}
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask){
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if(processMap != null){
return processMap;
}else if(parentInstance.getCommandType() == CommandType.REPEAT_RUNNING
|| parentInstance.isComplementData()){
// update current task id to map
// repeat running does not generate new sub process instance
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if(processMap!= null){
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for(TaskInstance task : preTaskList){
if(task.getName().equals(parentTask.getName())){
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if(map!=null){
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
private void createSubWorkProcessCommand(ProcessInstance parentProcessInstance,
TaskInstance task){
if(!task.isSubProcess()){
return;
}
ProcessInstanceMap instanceMap = setProcessInstanceMap(parentProcessInstance, task);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams());
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID));
ProcessInstance childInstance = findSubProcessInstance(parentProcessInstance.getId(), task.getId());
CommandType fatherType = parentProcessInstance.getCommandType();
CommandType commandType = fatherType;
if(childInstance == null || commandType == CommandType.REPEAT_RUNNING){
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
// sub process must begin with schedule/complement data
// if father begin with scheduler/complement data
if(fatherHistoryCommand.startsWith(CommandType.SCHEDULER.toString()) ||
fatherHistoryCommand.startsWith(CommandType.COMPLEMENT_DATA.toString())){
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
}
if(childInstance != null){
childInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateProcessInstance(childInstance);
}
// set sub work process command
String processMapStr = JSONUtils.toJson(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if(commandType == CommandType.COMPLEMENT_DATA ||
(childInstance != null && childInstance.isComplementData())){
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJson(cmdParam);
}
updateSubProcessDefinitionByParent(parentProcessInstance, childDefineId);
Command command = new Command();
command.setWarningType(parentProcessInstance.getWarningType());
command.setWarningGroupId(parentProcessInstance.getWarningGroupId());
command.setFailureStrategy(parentProcessInstance.getFailureStrategy());
command.setProcessDefinitionId(childDefineId);
command.setScheduleTime(parentProcessInstance.getScheduleTime());
command.setExecutorId(parentProcessInstance.getExecutorId());
command.setCommandParam(processMapStr);
command.setCommandType(commandType);
command.setProcessInstancePriority(parentProcessInstance.getProcessInstancePriority());
createCommand(command);
logger.info("sub process command created: {} ", command.toString());
}
/**
* update sub process definition
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionId childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, int childDefinitionId) {
ProcessDefinition fatherDefinition = this.findProcessDefineById(parentProcessInstance.getProcessDefinitionId());
ProcessDefinition childDefinition = this.findProcessDefineById(childDefinitionId);
if(childDefinition != null && fatherDefinition != null){
childDefinition.setReceivers(fatherDefinition.getReceivers());
childDefinition.setReceiversCc(fatherDefinition.getReceiversCc());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance){
ExecutionStatus processInstanceState = processInstance.getState();
if(taskInstance.getState().typeIsFailure()){
if(taskInstance.isSubProcess()){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}else {
if( processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE){
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if(taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}
taskInstance.setEndTime(null);
taskInstance.setStartTime(new Date());
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
taskInstance.setSubmitTime(new Date());
boolean saveResult = saveTaskInstance(taskInstance);
if(!saveResult){
return null;
}
return taskInstance;
}
/**
* submit task to queue
* @param taskInstance taskInstance
* @return whether submit task to queue success
*/
public Boolean submitTaskToQueue(TaskInstance taskInstance) {
try{
if(taskInstance.isSubProcess()){
return true;
}
if(taskInstance.getState().typeIsFinished()){
logger.info("submit to task queue, but task [{}] state [{}] is already finished. ", taskInstance.getName(), taskInstance.getState());
return true;
}
// task cannot submit when running
if(taskInstance.getState() == ExecutionStatus.RUNNING_EXEUTION){
logger.info("submit to task queue, but task [{}] state already be running. ", taskInstance.getName());
return true;
}
if(checkTaskExistsInTaskQueue(taskInstance)){
logger.info("submit to task queue, but task [{}] already exists in the queue.", taskInstance.getName());
return true;
}
logger.info("task ready to queue: {}" , taskInstance);
boolean insertQueueResult = taskQueue.add(DOLPHINSCHEDULER_TASKS_QUEUE, taskZkInfo(taskInstance));
logger.info("master insert into queue success, task : {}", taskInstance.getName());
return insertQueueResult;
}catch (Exception e){
logger.error("submit task to queue Exception: ", e);
logger.error("task queue error : {}", JSONUtils.toJson(taskInstance));
return false;
}
}
/**
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskInstanceId}_${task executed by ip1},${ip2}...
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
* @param taskInstance taskInstance
* @return task zk queue str
*/
public String taskZkInfo(TaskInstance taskInstance) {
int taskWorkerGroupId = getTaskWorkerGroupId(taskInstance);
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if(processInstance == null){
logger.error("process instance is null. please check the task info, task id: " + taskInstance.getId());
return "";
}
StringBuilder sb = new StringBuilder(100);
sb.append(processInstance.getProcessInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getProcessInstanceId()).append(Constants.UNDERLINE)
.append(taskInstance.getTaskInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getId()).append(Constants.UNDERLINE);
if(taskWorkerGroupId > 0){
//not to find data from db
WorkerGroup workerGroup = queryWorkerGroupById(taskWorkerGroupId);
if(workerGroup == null ){
logger.info("task {} cannot find the worker group, use all worker instead.", taskInstance.getId());
sb.append(Constants.DEFAULT_WORKER_ID);
return sb.toString();
}
String ips = workerGroup.getIpList();
if(StringUtils.isBlank(ips)){
logger.error("task:{} worker group:{} parameters(ip_list) is null, this task would be running on all workers",
taskInstance.getId(), workerGroup.getId());
sb.append(Constants.DEFAULT_WORKER_ID);
return sb.toString();
}
StringBuilder ipSb = new StringBuilder(100);
String[] ipArray = ips.split(COMMA);
for (String ip : ipArray) {
long ipLong = IpUtils.ipToLong(ip);
ipSb.append(ipLong).append(COMMA);
}
if(ipSb.length() > 0) {
ipSb.deleteCharAt(ipSb.length() - 1);
}
sb.append(ipSb);
}else{
sb.append(Constants.DEFAULT_WORKER_ID);
}
return sb.toString();
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState){
ExecutionStatus state = taskInstance.getState();
if(
// running or killed
// the task already exists in task queue
// return state
state == ExecutionStatus.RUNNING_EXEUTION
|| state == ExecutionStatus.KILL
|| checkTaskExistsInTaskQueue(taskInstance)
){
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if( processInstanceState == ExecutionStatus.READY_PAUSE){
state = ExecutionStatus.PAUSE;
}else if(processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
}else{
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance){
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if(failureStrategy == FailureStrategy.CONTINUE){
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for(TaskInstance task : taskInstances){
if(task.getState() == ExecutionStatus.FAILURE){
return false;
}
}
return true;
}
/**
* check the task instance existing in queue
* @param taskInstance taskInstance
* @return whether taskinstance exists queue
*/
public boolean checkTaskExistsInTaskQueue(TaskInstance taskInstance){
if(taskInstance.isSubProcess()){
return false;
}
String taskZkInfo = taskZkInfo(taskInstance);
return taskQueue.checkTaskExists(DOLPHINSCHEDULER_TASKS_QUEUE, taskZkInfo);
}
/**
* create a new process instance
* @param processInstance processInstance
*/
public void createProcessInstance(ProcessInstance processInstance){
if (processInstance != null){
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update work process instance to data base
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance){
if (processInstance == null){
logger.error("save error, process instance is null!");
return ;
}
if(processInstance.getId() != 0){
processInstanceMapper.updateById(processInstance);
}else{
createProcessInstance(processInstance);
}
}
/**
* insert or update command
* @param command command
* @return save command result
*/
public int saveCommand(Command command){
if(command.getId() != 0){
return commandMapper.updateById(command);
}else{
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance){
if(taskInstance.getId() != 0){
return updateTaskInstance(taskInstance);
}else{
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance){
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* delete a command by id
* @param id id
*/
public void delCommandByid(int id) {
commandMapper.deleteById(id);
}
/**
* find task instance by id
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId){
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId){
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if(taskInstance == null){
return taskInstance;
}
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefineById(taskInstance.getProcessDefinitionId());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
return taskInstance;
}
/**
* get id list by task state
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state){
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
Integer count = 0;
if(processInstanceMap !=null){
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId){
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId){
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId){
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance){
return processInstanceMapper.updateById(processInstance);
}
/**
* update the process instance
* @param processInstanceId processInstanceId
* @param processJson processJson
* @param globalParams globalParams
* @param scheduleTime scheduleTime
* @param flag flag
* @param locations locations
* @param connects connects
* @return update process instance result
*/
public int updateProcessInstance(Integer processInstanceId, String processJson,
String globalParams, Date scheduleTime, Flag flag,
String locations, String connects){
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
if(processInstance!= null){
processInstance.setProcessInstanceJson(processJson);
processInstance.setGlobalParams(globalParams);
processInstance.setScheduleTime(scheduleTime);
processInstance.setLocations(locations);
processInstance.setConnects(connects);
return processInstanceMapper.updateById(processInstance);
}
return 0;
}
/**
* change task state
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
*/
public void changeTaskState(ExecutionStatus state,
Date endTime,
int taskInstId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
saveTaskInstance(taskInstance);
}
/**
* convert integer list to string list
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList){
if(intList == null){
return new ArrayList<>();
}
List<String> result = new ArrayList<String>(intList.size());
for(Integer intVar : intList){
result.add(String.valueOf(intVar));
}
return result;
}
/**
* update pid and app links field by task instance id
* @param taskInstId taskInstId
* @param pid pid
* @param appLinks appLinks
*/
public void updatePidByTaskInstId(int taskInstId, int pid,String appLinks) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(pid);
taskInstance.setAppLink(appLinks);
saveTaskInstance(taskInstance);
}
/**
* query schedule by id
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionId
* @param processDefinitionId processDefinitionId
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(int processDefinitionId) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
}
/**
* query need failover process instance
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host){
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
* @param processInstance processInstance
*/
@Transactional(rollbackFor = Exception.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance){
//1 update processInstance host is null
processInstance.setHost("null");
processInstanceMapper.updateById(processInstance);
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionId(processInstance.getProcessDefinitionId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host){
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id){
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId){
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if(taskInstance!= null){
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByids(int[] ids){
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName,ResourceType resourceType){
return resourceMapper.queryTenantCodeByResourceName(resName,resourceType.ordinal());
}
/**
* find schedule list by process define id.
* @param ids ids
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineId(int[] ids){
return scheduleMapper.selectAllByProcessDefineArray(
ids);
}
/**
* get dependency cycle by work process define id and scheduler fire time
* @param masterId masterId
* @param processDefinitionId processDefinitionId
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency
* @throws Exception if error throws Exception
*/
public CycleDependency getCycleDependency(int masterId, int processDefinitionId, Date scheduledFireTime) throws Exception {
List<CycleDependency> list = getCycleDependencies(masterId,new int[]{processDefinitionId},scheduledFireTime);
return list.size()>0 ? list.get(0) : null;
}
/**
* get dependency cycle list by work process define id list and scheduler fire time
* @param masterId masterId
* @param ids ids
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency list
* @throws Exception if error throws Exception
*/
public List<CycleDependency> getCycleDependencies(int masterId,int[] ids,Date scheduledFireTime) throws Exception {
List<CycleDependency> cycleDependencyList = new ArrayList<CycleDependency>();
if(ArrayUtils.isEmpty(ids)){
logger.warn("ids[] is empty!is invalid!");
return cycleDependencyList;
}
if(scheduledFireTime == null){
logger.warn("scheduledFireTime is null!is invalid!");
return cycleDependencyList;
}
String strCrontab = "";
CronExpression depCronExpression;
Cron depCron;
List<Date> list;
List<Schedule> schedules = this.selectAllByProcessDefineId(ids);
// for all scheduling information
for(Schedule depSchedule:schedules){
strCrontab = depSchedule.getCrontab();
depCronExpression = CronUtils.parse2CronExpression(strCrontab);
depCron = CronUtils.parse2Cron(strCrontab);
CycleEnum cycleEnum = CronUtils.getMiniCycle(depCron);
if(cycleEnum == null){
logger.error("{} is not valid",strCrontab);
continue;
}
Calendar calendar = Calendar.getInstance();
switch (cycleEnum){
/*case MINUTE:
calendar.add(Calendar.MINUTE,-61);*/
case HOUR:
calendar.add(Calendar.HOUR,-25);
break;
case DAY:
calendar.add(Calendar.DATE,-32);
break;
case WEEK:
calendar.add(Calendar.DATE,-32);
break;
case MONTH:
calendar.add(Calendar.MONTH,-13);
break;
default:
logger.warn("Dependent process definition's cycleEnum is {},not support!!", cycleEnum.name());
continue;
}
Date start = calendar.getTime();
if(depSchedule.getProcessDefinitionId() == masterId){
list = CronUtils.getSelfFireDateList(start, scheduledFireTime, depCronExpression);
}else {
list = CronUtils.getFireDateList(start, scheduledFireTime, depCronExpression);
}
if(list.size()>=1){
start = list.get(list.size()-1);
CycleDependency dependency = new CycleDependency(depSchedule.getProcessDefinitionId(),start, CronUtils.getExpirationTime(start, cycleEnum), cycleEnum);
cycleDependencyList.add(dependency);
}
}
return cycleDependencyList;
}
/**
* find last scheduler process instance in the date interval
* @param definitionId definitionId
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastRunningProcess(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastRunningProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime(),
stateArray);
}
/**
* query user queue by process instance id
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId){
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if(processInstance == null){
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if(executor != null){
queue = executor.getQueue();
}
return queue;
}
/**
* query worker group by id
* @param workerGroupId workerGroupId
* @return WorkerGroup
*/
public WorkerGroup queryWorkerGroupById(int workerGroupId){
return workerGroupMapper.selectById(workerGroupId);
}
/**
* get task worker group id
* @param taskInstance taskInstance
* @return workerGroupId
*/
public int getTaskWorkerGroupId(TaskInstance taskInstance) {
int taskWorkerGroupId = taskInstance.getWorkerGroupId();
if(taskWorkerGroupId > 0){
return taskWorkerGroupId;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if(processInstance != null){
return processInstance.getWorkerGroupId();
}
logger.info("task : {} will use default worker group id", taskInstance.getId());
return Constants.DEFAULT_WORKER_ID;
}
/**
* get have perm project list
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId){
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if(createProjects == null){
createProjects = new ArrayList<>();
}
if(authedProjects != null){
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* get have perm project ids
* @param userId userId
* @return project ids
*/
public List<Integer> getProjectIdListHavePerm(int userId){
List<Integer> projectIdList = new ArrayList<>();
for(Project project : getProjectListHavePerm(userId)){
projectIdList.add(project.getId());
}
return projectIdList;
}
/**
* list unauthorized udf function
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId,T[] needChecks,AuthorizationType authorizationType){
List<T> resultList = new ArrayList<T>();
if (!ArrayUtils.isEmpty(needChecks)) {
Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks));
switch (authorizationType){
case RESOURCE_FILE_ID:
Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case UDF_FILE:
Set<Integer> authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfFiles);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
* @param userId user id
* @return User
*/
public User getUserById(int userId){
return userMapper.queryDetailsById(userId);
}
/**
* get resource by resoruce id
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId){
return resourceMapper.selectById(resoruceId);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,282 | this workflow is bug , I think it's a very serious problem | Is I also found a problem, it is a bug, is I have two workflow of a and b, then a workflow Riga, a1 node b work new b1 task dependent nodes, type selection, the chosen today, relying on a1, my situation is such, a workflow, I have the day before the creation of a1 run through a process, and then I run b process, theory should be run failed, but in fact was a success.I am also drunk, similarly I choose the type of b1 yesterday also can run successfully, but the first three days before the election run failed.That logo is faild | https://github.com/apache/dolphinscheduler/issues/2282 | https://github.com/apache/dolphinscheduler/pull/2329 | 949b8ef17d2b734b239ce31c997d7084868347b7 | 69e000b54214e8331ede69bdce9710b8418e960e | "2020-03-23T08:00:51Z" | java | "2020-04-02T16:16:01Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.5.RELEASE</spring.version>
<spring.boot.version>2.1.3.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.2.3</quartz.version>
<jackson.version>2.9.8</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<fastjson.version>1.2.61</fastjson.version>
<druid.version>1.1.14</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.6</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.18.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>${fastjson.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/common/utils/*.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/log/*.java</include>
<include>**/common/threadutils/*.java</include>
<include>**/common/graph/*.java</include>
<include>**/common/queue/*.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/FourLetterWordTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/enums/*.java</include>
<include>**/api/controller/DataSourceControllerTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<include>**/alert/utils/PropertyUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/AlertMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/entity/TaskInstanceTest.java</include>
<include>**/dao/cron/CronUtilsTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/server/worker/task/datax/DataxTaskTest.java</include>
<include>**/server/worker/task/shell/ShellTaskTest.java</include>
<include>**/server/worker/task/sqoop/SqoopTaskTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/dao/datasource/BaseDataSourceTest.java</include>
<include>**/alert/utils/MailUtilsTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>**/src/sass/common/_animation.scss</exclude>
<exclude>**/src/sass/common/_normalize.scss</exclude>
<exclude>.github/**</exclude>
<exclude>sql/soft_version</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,350 | [BUG] master fault tolerance error | 1. Start one master1 and two workers
2. Task setting timing, executed once every 1 minute, online timing
3. There are 3 workflow instances running on the master1 service, and the task shell_task_1 of the workflow instance is also in the "running" state.
4. Start the master2 service, the host of the workflow instance changes from master1 to master2, and the task shell_task_1 also starts fault-tolerant processing, as shown in the following figure, and finally shell_task_1 is executed 3 times:
===============================================
1.启动一台master1 , 2台worker
2.任务设置定时,1分钟执行一次,上线定时
3.有3个工作流实例在master1服务上运行,其中工作流实例的任务shell_task_1也处于“正在运行”状态
4.启动master2服务,工作流实例的host从master1变为master2,任务shell_task_1也开始容错处理,如下图所示,最终shell_task_1被执行3次:
===============================================
![image](https://user-images.githubusercontent.com/55787491/78106433-f4545180-7425-11ea-8bc6-ebcf916236f8.png)
![image](https://user-images.githubusercontent.com/55787491/78106449-fe765000-7425-11ea-8dd8-3c14ec90d4fb.png)
![image](https://user-images.githubusercontent.com/55787491/78106564-3d0c0a80-7426-11ea-956e-42b2b19a01cd.png)
**Which version of Dolphin Scheduler:**
-[refactor-worker]
| https://github.com/apache/dolphinscheduler/issues/2350 | https://github.com/apache/dolphinscheduler/pull/2375 | c9620ab557ec4c8d0bef1c87c93348b238a7bd8e | 785c34bc66b04dbccbd64639850dc44d67957aba | "2020-04-01T06:38:43Z" | java | "2020-04-07T08:23:31Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.zk;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.zk.AbstractZKClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.Date;
import java.util.List;
/**
* zookeeper master client
*
* single instance
*/
@Component
public class ZKMasterClient extends AbstractZKClient {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(ZKMasterClient.class);
/**
* process service
*/
@Autowired
private ProcessService processService;
public void start() {
InterProcessMutex mutex = null;
try {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master
String znodeLock = getMasterStartUpLockPath();
mutex = new InterProcessMutex(getZkClient(), znodeLock);
mutex.acquire();
// init system znode
this.initSystemZNode();
// check if fault tolerance is required?failure and tolerance
if (getActiveMasterNum() == 1) {
failoverWorker(null, true);
failoverMaster(null);
}
}catch (Exception e){
logger.error("master start up exception",e);
}finally {
releaseMutex(mutex);
}
}
@Override
public void close(){
super.close();
}
/**
* handle path events that this class cares about
* @param client zkClient
* @param event path event
* @param path zk path
*/
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(path.startsWith(getZNodeParentPath(ZKNodeType.MASTER)+Constants.SINGLE_SLASH)){ //monitor master
handleMasterEvent(event,path);
}else if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){ //monitor worker
handleWorkerEvent(event,path);
}
//other path event, ignore
}
/**
* remove zookeeper node path
*
* @param path zookeeper node path
* @param zkNodeType zookeeper node type
* @param failover is failover
*/
private void removeZKNodePath(String path, ZKNodeType zkNodeType, boolean failover) {
logger.info("{} node deleted : {}", zkNodeType.toString(), path);
InterProcessMutex mutex = null;
try {
String failoverPath = getFailoverLockPath(zkNodeType);
// create a distributed lock
mutex = new InterProcessMutex(getZkClient(), failoverPath);
mutex.acquire();
String serverHost = getHostByEventDataPath(path);
// handle dead server
handleDeadServer(path, zkNodeType, Constants.ADD_ZK_OP);
//failover server
if(failover){
failoverServerWhenDown(serverHost, zkNodeType);
}
}catch (Exception e){
logger.error("{} server failover failed.", zkNodeType.toString());
logger.error("failover exception ",e);
}
finally {
releaseMutex(mutex);
}
}
/**
* failover server when server down
*
* @param serverHost server host
* @param zkNodeType zookeeper node type
* @throws Exception exception
*/
private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception {
if(StringUtils.isEmpty(serverHost)){
return ;
}
switch (zkNodeType){
case MASTER:
failoverMaster(serverHost);
break;
case WORKER:
failoverWorker(serverHost, true);
default:
break;
}
}
/**
* get failover lock path
*
* @param zkNodeType zookeeper node type
* @return fail over lock path
*/
private String getFailoverLockPath(ZKNodeType zkNodeType){
switch (zkNodeType){
case MASTER:
return getMasterFailoverLockPath();
case WORKER:
return getWorkerFailoverLockPath();
default:
return "";
}
}
/**
* monitor master
* @param event event
* @param path path
*/
public void handleMasterEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("master node added : {}", path);
break;
case NODE_REMOVED:
removeZKNodePath(path, ZKNodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
* @param event event
* @param path path
*/
public void handleWorkerEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
logger.info("worker node deleted : {}", path);
removeZKNodePath(path, ZKNodeType.WORKER, true);
break;
default:
break;
}
}
/**
* task needs failover if task start before worker starts
*
* @param taskInstance task instance
* @return true if task instance need fail over
*/
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception {
boolean taskNeedFailover = true;
//now no host will execute this task instance,so no need to failover the task
if(taskInstance.getHost() == null){
return false;
}
// if the worker node exists in zookeeper, we must check the task starts after the worker
if(checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)){
//if task start after worker starts, there is no need to failover the task.
if(checkTaskAfterWorkerStart(taskInstance)){
taskNeedFailover = false;
}
}
return taskNeedFailover;
}
/**
* check task start after the worker server starts.
*
* @param taskInstance task instance
* @return true if task instance start time after worker server start date
*/
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
if(StringUtils.isEmpty(taskInstance.getHost())){
return false;
}
Date workerServerStartDate = null;
List<Server> workerServers = getServersList(ZKNodeType.WORKER);
for(Server workerServer : workerServers){
if(workerServer.getHost().equals(taskInstance.getHost())){
workerServerStartDate = workerServer.getCreateTime();
break;
}
}
if(workerServerStartDate != null){
return taskInstance.getStartTime().after(workerServerStartDate);
}else{
return false;
}
}
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
*/
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
* @param needCheckWorkerAlive need check worker alive
* @throws Exception exception
*/
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){
if(needCheckWorkerAlive){
if(!checkTaskInstanceNeedFailover(taskInstance)){
continue;
}
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(processInstance != null){
taskInstance.setProcessInstance(processInstance);
}
TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(processInstance)
.create();
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskExecutionContext);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processService.saveTaskInstance(taskInstance);
}
logger.info("end worker[{}] failover ...", workerHost);
}
/**
* failover master tasks
*
* @param masterHost master host
*/
private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){
processService.processNeedFailoverProcessInstances(processInstance);
}
logger.info("master failover end");
}
public InterProcessMutex blockAcquireMutex() throws Exception {
InterProcessMutex mutex = new InterProcessMutex(getZkClient(), getMasterLockPath());
mutex.acquire();
return mutex;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,350 | [BUG] master fault tolerance error | 1. Start one master1 and two workers
2. Task setting timing, executed once every 1 minute, online timing
3. There are 3 workflow instances running on the master1 service, and the task shell_task_1 of the workflow instance is also in the "running" state.
4. Start the master2 service, the host of the workflow instance changes from master1 to master2, and the task shell_task_1 also starts fault-tolerant processing, as shown in the following figure, and finally shell_task_1 is executed 3 times:
===============================================
1.启动一台master1 , 2台worker
2.任务设置定时,1分钟执行一次,上线定时
3.有3个工作流实例在master1服务上运行,其中工作流实例的任务shell_task_1也处于“正在运行”状态
4.启动master2服务,工作流实例的host从master1变为master2,任务shell_task_1也开始容错处理,如下图所示,最终shell_task_1被执行3次:
===============================================
![image](https://user-images.githubusercontent.com/55787491/78106433-f4545180-7425-11ea-8bc6-ebcf916236f8.png)
![image](https://user-images.githubusercontent.com/55787491/78106449-fe765000-7425-11ea-8dd8-3c14ec90d4fb.png)
![image](https://user-images.githubusercontent.com/55787491/78106564-3d0c0a80-7426-11ea-956e-42b2b19a01cd.png)
**Which version of Dolphin Scheduler:**
-[refactor-worker]
| https://github.com/apache/dolphinscheduler/issues/2350 | https://github.com/apache/dolphinscheduler/pull/2375 | c9620ab557ec4c8d0bef1c87c93348b238a7bd8e | 785c34bc66b04dbccbd64639850dc44d67957aba | "2020-04-01T06:38:43Z" | java | "2020-04-07T08:23:31Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.zk;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.ResInfo;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import java.util.*;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* abstract zookeeper client
*/
@Component
public abstract class AbstractZKClient extends ZookeeperCachedOperator {
private static final Logger logger = LoggerFactory.getLogger(AbstractZKClient.class);
/**
* remove dead server by host
* @param host host
* @param serverType serverType
* @throws Exception
*/
public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = super.getChildrenKeys(getDeadZNodeParentPath());
for(String serverPath : deadServers){
if(serverPath.startsWith(serverType+UNDERLINE+host)){
String server = getDeadZNodeParentPath() + SINGLE_SLASH + serverPath;
super.remove(server);
logger.info("{} server {} deleted from zk dead server path success" , serverType , host);
}
}
}
/**
* opType(add): if find dead server , then add to zk deadServerPath
* opType(delete): delete path from zk
*
* @param zNode node path
* @param zkNodeType master or worker
* @param opType delete or add
* @throws Exception errors
*/
public void handleDeadServer(String zNode, ZKNodeType zkNodeType, String opType) throws Exception {
String host = getHostByEventDataPath(zNode);
String type = (zkNodeType == ZKNodeType.MASTER) ? MASTER_PREFIX : WORKER_PREFIX;
//check server restart, if restart , dead server path in zk should be delete
if(opType.equals(DELETE_ZK_OP)){
removeDeadServerByHost(host, type);
}else if(opType.equals(ADD_ZK_OP)){
String deadServerPath = getDeadZNodeParentPath() + SINGLE_SLASH + type + UNDERLINE + host;
if(!super.isExisted(deadServerPath)){
//add dead server info to zk dead server path : /dead-servers/
super.persist(deadServerPath,(type + UNDERLINE + host));
logger.info("{} server dead , and {} added to zk dead server path success" ,
zkNodeType.toString(), zNode);
}
}
}
/**
* get active master num
* @return active master number
*/
public int getActiveMasterNum(){
List<String> childrenList = new ArrayList<>();
try {
// read master node parent path from conf
if(super.isExisted(getZNodeParentPath(ZKNodeType.MASTER))){
childrenList = super.getChildrenKeys(getZNodeParentPath(ZKNodeType.MASTER));
}
} catch (Exception e) {
logger.error("getActiveMasterNum error",e);
}
return childrenList.size();
}
/**
*
* @return zookeeper quorum
*/
public String getZookeeperQuorum(){
return getZookeeperConfig().getServerList();
}
/**
* get server list.
* @param zkNodeType zookeeper node type
* @return server list
*/
public List<Server> getServersList(ZKNodeType zkNodeType){
Map<String, String> masterMap = getServerMaps(zkNodeType);
String parentPath = getZNodeParentPath(zkNodeType);
List<Server> masterServers = new ArrayList<>();
int i = 0;
for (Map.Entry<String, String> entry : masterMap.entrySet()) {
Server masterServer = ResInfo.parseHeartbeatForZKInfo(entry.getValue());
masterServer.setZkDirectory( parentPath + "/"+ entry.getKey());
masterServer.setId(i);
i ++;
masterServers.add(masterServer);
}
return masterServers;
}
/**
* get master server list map.
* @param zkNodeType zookeeper node type
* @return result : {host : resource info}
*/
public Map<String, String> getServerMaps(ZKNodeType zkNodeType){
Map<String, String> masterMap = new HashMap<>();
try {
String path = getZNodeParentPath(zkNodeType);
List<String> serverList = super.getChildrenKeys(path);
for(String server : serverList){
masterMap.putIfAbsent(server, super.get(path + "/" + server));
}
} catch (Exception e) {
logger.error("get server list failed", e);
}
return masterMap;
}
/**
* check the zookeeper node already exists
* @param host host
* @param zkNodeType zookeeper node type
* @return true if exists
*/
public boolean checkZKNodeExists(String host, ZKNodeType zkNodeType) {
String path = getZNodeParentPath(zkNodeType);
if(StringUtils.isEmpty(path)){
logger.error("check zk node exists error, host:{}, zk node type:{}",
host, zkNodeType.toString());
return false;
}
Map<String, String> serverMaps = getServerMaps(zkNodeType);
for(String hostKey : serverMaps.keySet()){
if(hostKey.startsWith(host)){
return true;
}
}
return false;
}
/**
*
* @return get worker node parent path
*/
protected String getWorkerZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_WORKERS;
}
/**
*
* @return get master node parent path
*/
protected String getMasterZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_MASTERS;
}
/**
*
* @return get master lock path
*/
public String getMasterLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS;
}
/**
*
* @param zkNodeType zookeeper node type
* @return get zookeeper node parent path
*/
public String getZNodeParentPath(ZKNodeType zkNodeType) {
String path = "";
switch (zkNodeType){
case MASTER:
return getMasterZNodeParentPath();
case WORKER:
return getWorkerZNodeParentPath();
case DEAD_SERVER:
return getDeadZNodeParentPath();
default:
break;
}
return path;
}
/**
*
* @return get dead server node parent path
*/
protected String getDeadZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS;
}
/**
*
* @return get master start up lock path
*/
public String getMasterStartUpLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS;
}
/**
*
* @return get master failover lock path
*/
public String getMasterFailoverLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS;
}
/**
*
* @return get worker failover lock path
*/
public String getWorkerFailoverLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS;
}
/**
* release mutex
* @param mutex mutex
*/
public void releaseMutex(InterProcessMutex mutex) {
if (mutex != null){
try {
mutex.release();
} catch (Exception e) {
if(e.getMessage().equals("instance must be started before calling this method")){
logger.warn("lock release");
}else{
logger.error("lock release failed",e);
}
}
}
}
/**
* init system znode
*/
protected void initSystemZNode(){
try {
persist(getMasterZNodeParentPath(), "");
persist(getWorkerZNodeParentPath(), "");
persist(getDeadZNodeParentPath(), "");
logger.info("initialize server nodes success.");
} catch (Exception e) {
logger.error("init system znode failed",e);
}
}
/**
* get host ip, string format: masterParentPath/ip
* @param path path
* @return host ip, string format: masterParentPath/ip
*/
protected String getHostByEventDataPath(String path) {
if(StringUtils.isEmpty(path)){
logger.error("empty path!");
return "";
}
String[] pathArray = path.split(SINGLE_SLASH);
if(pathArray.length < 1){
logger.error("parse ip error: {}", path);
return "";
}
return pathArray[pathArray.length - 1];
}
@Override
public String toString() {
return "AbstractZKClient{" +
"zkClient=" + zkClient +
", deadServerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.DEAD_SERVER) + '\'' +
", masterZNodeParentPath='" + getZNodeParentPath(ZKNodeType.MASTER) + '\'' +
", workerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.WORKER) + '\'' +
'}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,349 | [BUG]Visit the worker page of the monitoring center, a null pointer occur |
![image](https://user-images.githubusercontent.com/55787491/78097549-2a3a0b80-740f-11ea-8302-5c63fa1b79dc.png)
![image](https://user-images.githubusercontent.com/55787491/78097589-3faf3580-740f-11ea-82d2-4249cef0e876.png)
**Which version of Dolphin Scheduler:**
-[refactor-worker]
| https://github.com/apache/dolphinscheduler/issues/2349 | https://github.com/apache/dolphinscheduler/pull/2375 | c9620ab557ec4c8d0bef1c87c93348b238a7bd8e | 785c34bc66b04dbccbd64639850dc44d67957aba | "2020-04-01T03:53:07Z" | java | "2020-04-07T08:23:31Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/zk/ZKMasterClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.zk;
import org.apache.commons.lang.StringUtils;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.TreeCacheEvent;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.entity.TaskExecutionContext;
import org.apache.dolphinscheduler.server.utils.ProcessUtils;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.zk.AbstractZKClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import java.util.Date;
import java.util.List;
/**
* zookeeper master client
*
* single instance
*/
@Component
public class ZKMasterClient extends AbstractZKClient {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(ZKMasterClient.class);
/**
* process service
*/
@Autowired
private ProcessService processService;
public void start() {
InterProcessMutex mutex = null;
try {
// create distributed lock with the root node path of the lock space as /dolphinscheduler/lock/failover/master
String znodeLock = getMasterStartUpLockPath();
mutex = new InterProcessMutex(getZkClient(), znodeLock);
mutex.acquire();
// init system znode
this.initSystemZNode();
// check if fault tolerance is required?failure and tolerance
if (getActiveMasterNum() == 1) {
failoverWorker(null, true);
failoverMaster(null);
}
}catch (Exception e){
logger.error("master start up exception",e);
}finally {
releaseMutex(mutex);
}
}
@Override
public void close(){
super.close();
}
/**
* handle path events that this class cares about
* @param client zkClient
* @param event path event
* @param path zk path
*/
@Override
protected void dataChanged(CuratorFramework client, TreeCacheEvent event, String path) {
if(path.startsWith(getZNodeParentPath(ZKNodeType.MASTER)+Constants.SINGLE_SLASH)){ //monitor master
handleMasterEvent(event,path);
}else if(path.startsWith(getZNodeParentPath(ZKNodeType.WORKER)+Constants.SINGLE_SLASH)){ //monitor worker
handleWorkerEvent(event,path);
}
//other path event, ignore
}
/**
* remove zookeeper node path
*
* @param path zookeeper node path
* @param zkNodeType zookeeper node type
* @param failover is failover
*/
private void removeZKNodePath(String path, ZKNodeType zkNodeType, boolean failover) {
logger.info("{} node deleted : {}", zkNodeType.toString(), path);
InterProcessMutex mutex = null;
try {
String failoverPath = getFailoverLockPath(zkNodeType);
// create a distributed lock
mutex = new InterProcessMutex(getZkClient(), failoverPath);
mutex.acquire();
String serverHost = getHostByEventDataPath(path);
// handle dead server
handleDeadServer(path, zkNodeType, Constants.ADD_ZK_OP);
//failover server
if(failover){
failoverServerWhenDown(serverHost, zkNodeType);
}
}catch (Exception e){
logger.error("{} server failover failed.", zkNodeType.toString());
logger.error("failover exception ",e);
}
finally {
releaseMutex(mutex);
}
}
/**
* failover server when server down
*
* @param serverHost server host
* @param zkNodeType zookeeper node type
* @throws Exception exception
*/
private void failoverServerWhenDown(String serverHost, ZKNodeType zkNodeType) throws Exception {
if(StringUtils.isEmpty(serverHost)){
return ;
}
switch (zkNodeType){
case MASTER:
failoverMaster(serverHost);
break;
case WORKER:
failoverWorker(serverHost, true);
default:
break;
}
}
/**
* get failover lock path
*
* @param zkNodeType zookeeper node type
* @return fail over lock path
*/
private String getFailoverLockPath(ZKNodeType zkNodeType){
switch (zkNodeType){
case MASTER:
return getMasterFailoverLockPath();
case WORKER:
return getWorkerFailoverLockPath();
default:
return "";
}
}
/**
* monitor master
* @param event event
* @param path path
*/
public void handleMasterEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("master node added : {}", path);
break;
case NODE_REMOVED:
removeZKNodePath(path, ZKNodeType.MASTER, true);
break;
default:
break;
}
}
/**
* monitor worker
* @param event event
* @param path path
*/
public void handleWorkerEvent(TreeCacheEvent event, String path){
switch (event.getType()) {
case NODE_ADDED:
logger.info("worker node added : {}", path);
break;
case NODE_REMOVED:
logger.info("worker node deleted : {}", path);
removeZKNodePath(path, ZKNodeType.WORKER, true);
break;
default:
break;
}
}
/**
* task needs failover if task start before worker starts
*
* @param taskInstance task instance
* @return true if task instance need fail over
*/
private boolean checkTaskInstanceNeedFailover(TaskInstance taskInstance) throws Exception {
boolean taskNeedFailover = true;
//now no host will execute this task instance,so no need to failover the task
if(taskInstance.getHost() == null){
return false;
}
// if the worker node exists in zookeeper, we must check the task starts after the worker
if(checkZKNodeExists(taskInstance.getHost(), ZKNodeType.WORKER)){
//if task start after worker starts, there is no need to failover the task.
if(checkTaskAfterWorkerStart(taskInstance)){
taskNeedFailover = false;
}
}
return taskNeedFailover;
}
/**
* check task start after the worker server starts.
*
* @param taskInstance task instance
* @return true if task instance start time after worker server start date
*/
private boolean checkTaskAfterWorkerStart(TaskInstance taskInstance) {
if(StringUtils.isEmpty(taskInstance.getHost())){
return false;
}
Date workerServerStartDate = null;
List<Server> workerServers = getServersList(ZKNodeType.WORKER);
for(Server workerServer : workerServers){
if(workerServer.getHost().equals(taskInstance.getHost())){
workerServerStartDate = workerServer.getCreateTime();
break;
}
}
if(workerServerStartDate != null){
return taskInstance.getStartTime().after(workerServerStartDate);
}else{
return false;
}
}
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
*/
/**
* failover worker tasks
*
* 1. kill yarn job if there are yarn jobs in tasks.
* 2. change task state from running to need failover.
* 3. failover all tasks when workerHost is null
* @param workerHost worker host
* @param needCheckWorkerAlive need check worker alive
* @throws Exception exception
*/
private void failoverWorker(String workerHost, boolean needCheckWorkerAlive) throws Exception {
logger.info("start worker[{}] failover ...", workerHost);
List<TaskInstance> needFailoverTaskInstanceList = processService.queryNeedFailoverTaskInstances(workerHost);
for(TaskInstance taskInstance : needFailoverTaskInstanceList){
if(needCheckWorkerAlive){
if(!checkTaskInstanceNeedFailover(taskInstance)){
continue;
}
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
if(processInstance != null){
taskInstance.setProcessInstance(processInstance);
}
TaskExecutionContext taskExecutionContext = TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(processInstance)
.create();
// only kill yarn job if exists , the local thread has exited
ProcessUtils.killYarnJob(taskExecutionContext);
taskInstance.setState(ExecutionStatus.NEED_FAULT_TOLERANCE);
processService.saveTaskInstance(taskInstance);
}
logger.info("end worker[{}] failover ...", workerHost);
}
/**
* failover master tasks
*
* @param masterHost master host
*/
private void failoverMaster(String masterHost) {
logger.info("start master failover ...");
List<ProcessInstance> needFailoverProcessInstanceList = processService.queryNeedFailoverProcessInstances(masterHost);
//updateProcessInstance host is null and insert into command
for(ProcessInstance processInstance : needFailoverProcessInstanceList){
processService.processNeedFailoverProcessInstances(processInstance);
}
logger.info("master failover end");
}
public InterProcessMutex blockAcquireMutex() throws Exception {
InterProcessMutex mutex = new InterProcessMutex(getZkClient(), getMasterLockPath());
mutex.acquire();
return mutex;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,349 | [BUG]Visit the worker page of the monitoring center, a null pointer occur |
![image](https://user-images.githubusercontent.com/55787491/78097549-2a3a0b80-740f-11ea-8302-5c63fa1b79dc.png)
![image](https://user-images.githubusercontent.com/55787491/78097589-3faf3580-740f-11ea-82d2-4249cef0e876.png)
**Which version of Dolphin Scheduler:**
-[refactor-worker]
| https://github.com/apache/dolphinscheduler/issues/2349 | https://github.com/apache/dolphinscheduler/pull/2375 | c9620ab557ec4c8d0bef1c87c93348b238a7bd8e | 785c34bc66b04dbccbd64639850dc44d67957aba | "2020-04-01T03:53:07Z" | java | "2020-04-07T08:23:31Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/zk/AbstractZKClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.zk;
import org.apache.curator.framework.recipes.locks.InterProcessMutex;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ZKNodeType;
import org.apache.dolphinscheduler.common.model.Server;
import org.apache.dolphinscheduler.common.utils.ResInfo;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import java.util.*;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* abstract zookeeper client
*/
@Component
public abstract class AbstractZKClient extends ZookeeperCachedOperator {
private static final Logger logger = LoggerFactory.getLogger(AbstractZKClient.class);
/**
* remove dead server by host
* @param host host
* @param serverType serverType
* @throws Exception
*/
public void removeDeadServerByHost(String host, String serverType) throws Exception {
List<String> deadServers = super.getChildrenKeys(getDeadZNodeParentPath());
for(String serverPath : deadServers){
if(serverPath.startsWith(serverType+UNDERLINE+host)){
String server = getDeadZNodeParentPath() + SINGLE_SLASH + serverPath;
super.remove(server);
logger.info("{} server {} deleted from zk dead server path success" , serverType , host);
}
}
}
/**
* opType(add): if find dead server , then add to zk deadServerPath
* opType(delete): delete path from zk
*
* @param zNode node path
* @param zkNodeType master or worker
* @param opType delete or add
* @throws Exception errors
*/
public void handleDeadServer(String zNode, ZKNodeType zkNodeType, String opType) throws Exception {
String host = getHostByEventDataPath(zNode);
String type = (zkNodeType == ZKNodeType.MASTER) ? MASTER_PREFIX : WORKER_PREFIX;
//check server restart, if restart , dead server path in zk should be delete
if(opType.equals(DELETE_ZK_OP)){
removeDeadServerByHost(host, type);
}else if(opType.equals(ADD_ZK_OP)){
String deadServerPath = getDeadZNodeParentPath() + SINGLE_SLASH + type + UNDERLINE + host;
if(!super.isExisted(deadServerPath)){
//add dead server info to zk dead server path : /dead-servers/
super.persist(deadServerPath,(type + UNDERLINE + host));
logger.info("{} server dead , and {} added to zk dead server path success" ,
zkNodeType.toString(), zNode);
}
}
}
/**
* get active master num
* @return active master number
*/
public int getActiveMasterNum(){
List<String> childrenList = new ArrayList<>();
try {
// read master node parent path from conf
if(super.isExisted(getZNodeParentPath(ZKNodeType.MASTER))){
childrenList = super.getChildrenKeys(getZNodeParentPath(ZKNodeType.MASTER));
}
} catch (Exception e) {
logger.error("getActiveMasterNum error",e);
}
return childrenList.size();
}
/**
*
* @return zookeeper quorum
*/
public String getZookeeperQuorum(){
return getZookeeperConfig().getServerList();
}
/**
* get server list.
* @param zkNodeType zookeeper node type
* @return server list
*/
public List<Server> getServersList(ZKNodeType zkNodeType){
Map<String, String> masterMap = getServerMaps(zkNodeType);
String parentPath = getZNodeParentPath(zkNodeType);
List<Server> masterServers = new ArrayList<>();
int i = 0;
for (Map.Entry<String, String> entry : masterMap.entrySet()) {
Server masterServer = ResInfo.parseHeartbeatForZKInfo(entry.getValue());
masterServer.setZkDirectory( parentPath + "/"+ entry.getKey());
masterServer.setId(i);
i ++;
masterServers.add(masterServer);
}
return masterServers;
}
/**
* get master server list map.
* @param zkNodeType zookeeper node type
* @return result : {host : resource info}
*/
public Map<String, String> getServerMaps(ZKNodeType zkNodeType){
Map<String, String> masterMap = new HashMap<>();
try {
String path = getZNodeParentPath(zkNodeType);
List<String> serverList = super.getChildrenKeys(path);
for(String server : serverList){
masterMap.putIfAbsent(server, super.get(path + "/" + server));
}
} catch (Exception e) {
logger.error("get server list failed", e);
}
return masterMap;
}
/**
* check the zookeeper node already exists
* @param host host
* @param zkNodeType zookeeper node type
* @return true if exists
*/
public boolean checkZKNodeExists(String host, ZKNodeType zkNodeType) {
String path = getZNodeParentPath(zkNodeType);
if(StringUtils.isEmpty(path)){
logger.error("check zk node exists error, host:{}, zk node type:{}",
host, zkNodeType.toString());
return false;
}
Map<String, String> serverMaps = getServerMaps(zkNodeType);
for(String hostKey : serverMaps.keySet()){
if(hostKey.startsWith(host)){
return true;
}
}
return false;
}
/**
*
* @return get worker node parent path
*/
protected String getWorkerZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_WORKERS;
}
/**
*
* @return get master node parent path
*/
protected String getMasterZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_MASTERS;
}
/**
*
* @return get master lock path
*/
public String getMasterLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_MASTERS;
}
/**
*
* @param zkNodeType zookeeper node type
* @return get zookeeper node parent path
*/
public String getZNodeParentPath(ZKNodeType zkNodeType) {
String path = "";
switch (zkNodeType){
case MASTER:
return getMasterZNodeParentPath();
case WORKER:
return getWorkerZNodeParentPath();
case DEAD_SERVER:
return getDeadZNodeParentPath();
default:
break;
}
return path;
}
/**
*
* @return get dead server node parent path
*/
protected String getDeadZNodeParentPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_DEAD_SERVERS;
}
/**
*
* @return get master start up lock path
*/
public String getMasterStartUpLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS;
}
/**
*
* @return get master failover lock path
*/
public String getMasterFailoverLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS;
}
/**
*
* @return get worker failover lock path
*/
public String getWorkerFailoverLockPath(){
return getZookeeperConfig().getDsRoot() + Constants.ZOOKEEPER_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS;
}
/**
* release mutex
* @param mutex mutex
*/
public void releaseMutex(InterProcessMutex mutex) {
if (mutex != null){
try {
mutex.release();
} catch (Exception e) {
if(e.getMessage().equals("instance must be started before calling this method")){
logger.warn("lock release");
}else{
logger.error("lock release failed",e);
}
}
}
}
/**
* init system znode
*/
protected void initSystemZNode(){
try {
persist(getMasterZNodeParentPath(), "");
persist(getWorkerZNodeParentPath(), "");
persist(getDeadZNodeParentPath(), "");
logger.info("initialize server nodes success.");
} catch (Exception e) {
logger.error("init system znode failed",e);
}
}
/**
* get host ip, string format: masterParentPath/ip
* @param path path
* @return host ip, string format: masterParentPath/ip
*/
protected String getHostByEventDataPath(String path) {
if(StringUtils.isEmpty(path)){
logger.error("empty path!");
return "";
}
String[] pathArray = path.split(SINGLE_SLASH);
if(pathArray.length < 1){
logger.error("parse ip error: {}", path);
return "";
}
return pathArray[pathArray.length - 1];
}
@Override
public String toString() {
return "AbstractZKClient{" +
"zkClient=" + zkClient +
", deadServerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.DEAD_SERVER) + '\'' +
", masterZNodeParentPath='" + getZNodeParentPath(ZKNodeType.MASTER) + '\'' +
", workerZNodeParentPath='" + getZNodeParentPath(ZKNodeType.WORKER) + '\'' +
'}';
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,450 | [BUG] cannot complement data for one day |
cannot complement data for one day.
![image](https://user-images.githubusercontent.com/29528966/79549206-443f4380-80c9-11ea-9ec9-678b65095969.png)
| https://github.com/apache/dolphinscheduler/issues/2450 | https://github.com/apache/dolphinscheduler/pull/2493 | 2f5427ca33b8e929d58412699699141191c640b4 | b5fdce4164385f1df2ddb25c9441b50ea28ce681 | "2020-04-17T08:35:59Z" | java | "2020-04-23T02:23:32Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ExecutorService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import org.apache.dolphinscheduler.api.enums.ExecuteType;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.common.utils.DateUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper;
import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper;
import org.apache.dolphinscheduler.dao.mapper.ProjectMapper;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.text.ParseException;
import java.util.*;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* executor service
*/
@Service
public class ExecutorService extends BaseService{
private static final Logger logger = LoggerFactory.getLogger(ExecutorService.class);
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
@Autowired
private ProcessDefinitionService processDefinitionService;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private ProcessService processService;
/**
* execute process instance
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process Definition Id
* @param cronTime cron time
* @param commandType command type
* @param failureStrategy failuer strategy
* @param startNodeList start nodelist
* @param taskDependType node dependency type
* @param warningType warning type
* @param warningGroupId notify group id
* @param receivers receivers
* @param receiversCc receivers cc
* @param processInstancePriority process instance priority
* @param workerGroup worker group name
* @param runMode run mode
* @param timeout timeout
* @return execute process instance code
* @throws ParseException Parse Exception
*/
public Map<String, Object> execProcessInstance(User loginUser, String projectName,
int processDefinitionId, String cronTime, CommandType commandType,
FailureStrategy failureStrategy, String startNodeList,
TaskDependType taskDependType, WarningType warningType, int warningGroupId,
String receivers, String receiversCc, RunMode runMode,
Priority processInstancePriority, String workerGroup, Integer timeout) throws ParseException {
Map<String, Object> result = new HashMap<>(5);
// timeout is invalid
if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) {
putMsg(result,Status.TASK_TIMEOUT_PARAMS_ERROR);
return result;
}
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResultAndAuth = checkResultAndAuth(loginUser, projectName, project);
if (checkResultAndAuth != null){
return checkResultAndAuth;
}
// check process define release state
ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefinitionId);
result = checkProcessDefinitionValid(processDefinition, processDefinitionId);
if(result.get(Constants.STATUS) != Status.SUCCESS){
return result;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
return result;
}
/**
* create command
*/
int create = this.createCommand(commandType, processDefinitionId,
taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(),
warningGroupId, runMode,processInstancePriority, workerGroup);
if(create > 0 ){
/**
* according to the process definition ID updateProcessInstance and CC recipient
*/
processDefinition.setReceivers(receivers);
processDefinition.setReceiversCc(receiversCc);
processDefinitionMapper.updateById(processDefinition);
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.START_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check whether the process definition can be executed
*
* @param processDefinition process definition
* @param processDefineId process definition id
* @return check result code
*/
public Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, int processDefineId){
Map<String, Object> result = new HashMap<>(5);
if (processDefinition == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST,processDefineId);
} else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) {
// check process definition online
putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE,processDefineId);
}else{
result.put(Constants.STATUS, Status.SUCCESS);
}
return result;
}
/**
* do action to process instance:pause, stop, repeat, recover from pause, recover from stop
*
* @param loginUser login user
* @param projectName project name
* @param processInstanceId process instance id
* @param executeType execute type
* @return execute result code
*/
public Map<String, Object> execute(User loginUser, String projectName, Integer processInstanceId, ExecuteType executeType) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = checkResultAndAuth(loginUser, projectName, project);
if (checkResult != null) {
return checkResult;
}
ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId);
if (processInstance == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId);
return result;
}
ProcessDefinition processDefinition = processService.findProcessDefineById(processInstance.getProcessDefinitionId());
if(executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE){
result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionId());
if (result.get(Constants.STATUS) != Status.SUCCESS) {
return result;
}
}
checkResult = checkExecuteType(processInstance, executeType);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
}
if (!checkTenantSuitable(processDefinition)){
logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ",
processDefinition.getId(), processDefinition.getName());
putMsg(result, Status.TENANT_NOT_SUITABLE);
}
switch (executeType) {
case REPEAT_RUNNING:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.REPEAT_RUNNING);
break;
case RECOVER_SUSPENDED_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.RECOVER_SUSPENDED_PROCESS);
break;
case START_FAILURE_TASK_PROCESS:
result = insertCommand(loginUser, processInstanceId, processDefinition.getId(), CommandType.START_FAILURE_TASK_PROCESS);
break;
case STOP:
if (processInstance.getState() == ExecutionStatus.READY_STOP) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP);
}
break;
case PAUSE:
if (processInstance.getState() == ExecutionStatus.READY_PAUSE) {
putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState());
} else {
result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE);
}
break;
default:
logger.error("unknown execute type : {}", executeType);
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type");
break;
}
return result;
}
/**
* check tenant suitable
* @param processDefinition process definition
* @return true if tenant suitable, otherwise return false
*/
private boolean checkTenantSuitable(ProcessDefinition processDefinition) {
// checkTenantExists();
Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(),
processDefinition.getUserId());
return tenant != null;
}
/**
* Check the state of process instance and the type of operation match
*
* @param processInstance process instance
* @param executeType execute type
* @return check result code
*/
private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) {
Map<String, Object> result = new HashMap<>(5);
ExecutionStatus executionStatus = processInstance.getState();
boolean checkResult = false;
switch (executeType) {
case PAUSE:
case STOP:
if (executionStatus.typeIsRunning()) {
checkResult = true;
}
break;
case REPEAT_RUNNING:
if (executionStatus.typeIsFinished()) {
checkResult = true;
}
break;
case START_FAILURE_TASK_PROCESS:
if (executionStatus.typeIsFailure()) {
checkResult = true;
}
break;
case RECOVER_SUSPENDED_PROCESS:
if (executionStatus.typeIsPause()|| executionStatus.typeIsCancel()) {
checkResult = true;
}
break;
default:
break;
}
if (!checkResult) {
putMsg(result,Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString());
} else {
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* prepare to update process instance command type and status
*
* @param processInstance process instance
* @param commandType command type
* @param executionStatus execute status
* @return update result
*/
private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) {
Map<String, Object> result = new HashMap<>(5);
processInstance.setCommandType(commandType);
processInstance.addHistoryCmd(commandType);
processInstance.setState(executionStatus);
int update = processService.updateProcessInstance(processInstance);
// determine whether the process is normal
if (update > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* insert command, used in the implementation of the page, re run, recovery (pause / failure) execution
*
* @param loginUser login user
* @param instanceId instance id
* @param processDefinitionId process definition id
* @param commandType command type
* @return insert result code
*/
private Map<String, Object> insertCommand(User loginUser, Integer instanceId, Integer processDefinitionId, CommandType commandType) {
Map<String, Object> result = new HashMap<>(5);
Command command = new Command();
command.setCommandType(commandType);
command.setProcessDefinitionId(processDefinitionId);
command.setCommandParam(String.format("{\"%s\":%d}",
CMDPARAM_RECOVER_PROCESS_ID_STRING, instanceId));
command.setExecutorId(loginUser.getId());
if(!processService.verifyIsNeedCreateCommand(command)){
putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND,processDefinitionId);
return result;
}
int create = processService.createCommand(command);
if (create > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR);
}
return result;
}
/**
* check if sub processes are offline before starting process definition
* @param processDefineId process definition id
* @return check result code
*/
public Map<String, Object> startCheckByProcessDefinedId(int processDefineId) {
Map<String, Object> result = new HashMap<>();
if (processDefineId == 0){
logger.error("process definition id is null");
putMsg(result,Status.REQUEST_PARAMS_NOT_VALID_ERROR,"process definition id");
}
List<Integer> ids = new ArrayList<>();
processService.recurseFindSubProcessId(processDefineId, ids);
Integer[] idArray = ids.toArray(new Integer[ids.size()]);
if (!ids.isEmpty()){
List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray);
if (processDefinitionList != null){
for (ProcessDefinition processDefinition : processDefinitionList){
/**
* if there is no online process, exit directly
*/
if (processDefinition.getReleaseState() != ReleaseState.ONLINE){
putMsg(result,Status.PROCESS_DEFINE_NOT_RELEASE, processDefinition.getName());
logger.info("not release process definition id: {} , name : {}",
processDefinition.getId(), processDefinition.getName());
return result;
}
}
}
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query recipients and copyers by process definition id or processInstanceId
*
* @param processDefineId process definition id
* @param processInstanceId process instance id
* @return receivers cc list
*/
public Map<String, Object> getReceiverCc(Integer processDefineId,Integer processInstanceId) {
Map<String, Object> result = new HashMap<>();
logger.info("processInstanceId {}",processInstanceId);
if(processDefineId == null && processInstanceId == null){
throw new RuntimeException("You must set values for parameters processDefineId or processInstanceId");
}
if(processDefineId == null && processInstanceId != null) {
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if (processInstance == null) {
throw new RuntimeException("processInstanceId is not exists");
}
processDefineId = processInstance.getProcessDefinitionId();
}
ProcessDefinition processDefinition = processDefinitionMapper.selectById(processDefineId);
if (processDefinition == null){
throw new RuntimeException(String.format("processDefineId %d is not exists",processDefineId));
}
String receivers = processDefinition.getReceivers();
String receiversCc = processDefinition.getReceiversCc();
Map<String,String> dataMap = new HashMap<>();
dataMap.put(Constants.RECEIVERS,receivers);
dataMap.put(Constants.RECEIVERS_CC,receiversCc);
result.put(Constants.DATA_LIST, dataMap);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* create command
* @param commandType commandType
* @param processDefineId processDefineId
* @param nodeDep nodeDep
* @param failureStrategy failureStrategy
* @param startNodeList startNodeList
* @param schedule schedule
* @param warningType warningType
* @param executorId executorId
* @param warningGroupId warningGroupId
* @param runMode runMode
* @param processInstancePriority processInstancePriority
* @param workerGroup workerGroup
* @return command id
* @throws ParseException
*/
private int createCommand(CommandType commandType, int processDefineId,
TaskDependType nodeDep, FailureStrategy failureStrategy,
String startNodeList, String schedule, WarningType warningType,
int executorId, int warningGroupId,
RunMode runMode,Priority processInstancePriority, String workerGroup) throws ParseException {
/**
* instantiate command schedule instance
*/
Command command = new Command();
Map<String,String> cmdParam = new HashMap<>();
if(commandType == null){
command.setCommandType(CommandType.START_PROCESS);
}else{
command.setCommandType(commandType);
}
command.setProcessDefinitionId(processDefineId);
if(nodeDep != null){
command.setTaskDependType(nodeDep);
}
if(failureStrategy != null){
command.setFailureStrategy(failureStrategy);
}
if(StringUtils.isNotEmpty(startNodeList)){
cmdParam.put(CMDPARAM_START_NODE_NAMES, startNodeList);
}
if(warningType != null){
command.setWarningType(warningType);
}
command.setCommandParam(JSONUtils.toJson(cmdParam));
command.setExecutorId(executorId);
command.setWarningGroupId(warningGroupId);
command.setProcessInstancePriority(processInstancePriority);
command.setWorkerGroup(workerGroup);
Date start = null;
Date end = null;
if(StringUtils.isNotEmpty(schedule)){
String[] interval = schedule.split(",");
if(interval.length == 2){
start = DateUtils.getScheduleDate(interval[0]);
end = DateUtils.getScheduleDate(interval[1]);
}
}
// determine whether to complement
if(commandType == CommandType.COMPLEMENT_DATA){
runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode;
if(null != start && null != end && start.before(end)){
if(runMode == RunMode.RUN_MODE_SERIAL){
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end));
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processService.createCommand(command);
}else if (runMode == RunMode.RUN_MODE_PARALLEL){
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefineId);
List<Date> listDate = new LinkedList<>();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule item : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(start, end, item.getCrontab()));
}
}
if(!CollectionUtils.isEmpty(listDate)){
// loop by schedule date
for (Date date : listDate) {
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(date));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(date));
command.setCommandParam(JSONUtils.toJson(cmdParam));
processService.createCommand(command);
}
return listDate.size();
}else{
// loop by day
int runCunt = 0;
while(!start.after(end)) {
runCunt += 1;
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start));
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start));
command.setCommandParam(JSONUtils.toJson(cmdParam));
processService.createCommand(command);
start = DateUtils.getSomeDay(start, 1);
}
return runCunt;
}
}
}else{
logger.error("there is not valid schedule date for the process definition: id:{},date:{}",
processDefineId, schedule);
}
}else{
command.setCommandParam(JSONUtils.toJson(cmdParam));
return processService.createCommand(command);
}
return 0;
}
/**
* check result and auth
*
* @param loginUser
* @param projectName
* @param project
* @return
*/
private Map<String, Object> checkResultAndAuth(User loginUser, String projectName, Project project) {
// check project auth
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status status = (Status) checkResult.get(Constants.STATUS);
if (status != Status.SUCCESS) {
return checkResult;
}
return null;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,492 | [BUG] sh stop-all.sh, the master service is not killed | 1. sh stop-all.sh
2. Check the master service,the master service is not killed
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2492 | https://github.com/apache/dolphinscheduler/pull/2497 | e837a73fcd823be5f7488d0b36085a044239defe | 21c10f886288ee6667f052eff294da7964de8e64 | "2020-04-22T03:20:31Z" | java | "2020-04-23T09:12:16Z" | script/dolphinscheduler-daemon.sh | #!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
usage="Usage: dolphinscheduler-daemon.sh (start|stop) <command> "
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
startStop=$1
shift
command=$1
shift
echo "Begin $startStop $command......"
BIN_DIR=`dirname $0`
BIN_DIR=`cd "$BIN_DIR"; pwd`
DOLPHINSCHEDULER_HOME=$BIN_DIR/..
source /etc/profile
export JAVA_HOME=$JAVA_HOME
#export JAVA_HOME=/opt/soft/jdk
export HOSTNAME=`hostname`
export DOLPHINSCHEDULER_PID_DIR=/tmp/
export DOLPHINSCHEDULER_LOG_DIR=$DOLPHINSCHEDULER_HOME/logs
export DOLPHINSCHEDULER_CONF_DIR=$DOLPHINSCHEDULER_HOME/conf
export DOLPHINSCHEDULER_LIB_JARS=$DOLPHINSCHEDULER_HOME/lib/*
export DOLPHINSCHEDULER_OPTS="-server -Xmx16g -Xms1g -Xss512k -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+CMSParallelRemarkEnabled -XX:LargePageSizeInBytes=128m -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=70"
export STOP_TIMEOUT=5
if [ ! -d "$DOLPHINSCHEDULER_LOG_DIR" ]; then
mkdir $DOLPHINSCHEDULER_LOG_DIR
fi
log=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command-$HOSTNAME.out
pid=$DOLPHINSCHEDULER_LOG_DIR/dolphinscheduler-$command.pid
cd $DOLPHINSCHEDULER_HOME
if [ "$command" = "api-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-api.xml -Dspring.profiles.active=api"
CLASS=org.apache.dolphinscheduler.api.ApiApplicationServer
elif [ "$command" = "master-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-master.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.master.MasterServer
elif [ "$command" = "worker-server" ]; then
LOG_FILE="-Dlogging.config=classpath:logback-worker.xml -Ddruid.mysql.usePingMethod=false"
CLASS=org.apache.dolphinscheduler.server.worker.WorkerServer
elif [ "$command" = "alert-server" ]; then
LOG_FILE="-Dlogback.configurationFile=conf/logback-alert.xml"
CLASS=org.apache.dolphinscheduler.alert.AlertServer
elif [ "$command" = "logger-server" ]; then
CLASS=org.apache.dolphinscheduler.server.log.LoggerServer
else
echo "Error: No command named \`$command' was found."
exit 1
fi
case $startStop in
(start)
[ -w "$DOLPHINSCHEDULER_PID_DIR" ] || mkdir -p "$DOLPHINSCHEDULER_PID_DIR"
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
echo starting $command, logging to $log
exec_command="$LOG_FILE $DOLPHINSCHEDULER_OPTS -classpath $DOLPHINSCHEDULER_CONF_DIR:$DOLPHINSCHEDULER_LIB_JARS $CLASS"
echo "nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &"
nohup $JAVA_HOME/bin/java $exec_command > $log 2>&1 &
echo $! > $pid
;;
(stop)
if [ -f $pid ]; then
TARGET_PID=`cat $pid`
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
kill $TARGET_PID
sleep $STOP_TIMEOUT
if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo "$command did not stop gracefully after $STOP_TIMEOUT seconds: killing with kill -9"
kill -9 $TARGET_PID
fi
else
echo no $command to stop
fi
rm -f $pid
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac
echo "End $startStop $command." |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,442 | [BUG] update the directory of resource is error |
update the directory occasionally appears the resource file does't exist, and the actual update directory is successful
![image](https://user-images.githubusercontent.com/55787491/79429815-9feecb80-7ffa-11ea-9c00-41e88eed05e9.png)
![image](https://user-images.githubusercontent.com/55787491/79429960-cd3b7980-7ffa-11ea-958e-32cefebfeffe.png)
![image](https://user-images.githubusercontent.com/55787491/79430009-e17f7680-7ffa-11ea-91ea-87c9d91237b1.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2442 | https://github.com/apache/dolphinscheduler/pull/2524 | c50ad1240c4789403e9518c855c0217fc9030b5b | e6dec8d2a7a6d39280675d225ec818e0cc7012be | "2020-04-16T08:01:56Z" | java | "2020-04-27T06:16:21Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.text.MessageFormat;
import java.util.*;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
String nameWithSuffix = name;
String originResourceName = resource.getAlias();
if (!resource.isDirectory()) {
//get the file suffix
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource,false);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name)) {
return result;
}
// get the path of origin file in hdfs
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} else {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
}
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.queryDetailsById(userId);
if(user == null){
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,442 | [BUG] update the directory of resource is error |
update the directory occasionally appears the resource file does't exist, and the actual update directory is successful
![image](https://user-images.githubusercontent.com/55787491/79429815-9feecb80-7ffa-11ea-9c00-41e88eed05e9.png)
![image](https://user-images.githubusercontent.com/55787491/79429960-cd3b7980-7ffa-11ea-958e-32cefebfeffe.png)
![image](https://user-images.githubusercontent.com/55787491/79430009-e17f7680-7ffa-11ea-91ea-87c9d91237b1.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2442 | https://github.com/apache/dolphinscheduler/pull/2524 | c50ad1240c4789403e9518c855c0217fc9030b5b | e6dec8d2a7a6d39280675d225ec818e0cc7012be | "2020-04-16T08:01:56Z" | java | "2020-04-27T06:16:21Z" | dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.omg.CORBA.Any;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.mock.web.MockMultipartFile;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@RunWith(PowerMockRunner.class)
@PowerMockIgnore({"sun.security.*", "javax.net.*"})
@PrepareForTest({HadoopUtils.class,PropertyUtils.class, FileUtils.class,org.apache.dolphinscheduler.api.utils.FileUtils.class})
public class ResourcesServiceTest {
private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceTest.class);
@InjectMocks
private ResourcesService resourcesService;
@Mock
private ResourceMapper resourcesMapper;
@Mock
private TenantMapper tenantMapper;
@Mock
private ResourceUserMapper resourceUserMapper;
@Mock
private HadoopUtils hadoopUtils;
@Mock
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
@Mock
private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
PowerMockito.mockStatic(HadoopUtils.class);
PowerMockito.mockStatic(FileUtils.class);
PowerMockito.mockStatic(org.apache.dolphinscheduler.api.utils.FileUtils.class);
try {
// new HadoopUtils
PowerMockito.whenNew(HadoopUtils.class).withNoArguments().thenReturn(hadoopUtils);
} catch (Exception e) {
e.printStackTrace();
}
PowerMockito.when(HadoopUtils.getInstance()).thenReturn(hadoopUtils);
PowerMockito.mockStatic(PropertyUtils.class);
}
@Test
public void testCreateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_FORBID_CHANGE
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
}
@Test
public void testCreateDirecotry(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//PARENT_RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@Test
public void testUpdateResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
Result result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.updateResource(user,0,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//USER_NO_OPERATION_PERM
result = resourcesService.updateResource(user,1,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM.getMsg(),result.getMsg());
//SUCCESS
user.setId(1);
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
e.printStackTrace();
}
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testQueryResourceListPaging(){
User loginUser = new User();
loginUser.setUserType(UserType.ADMIN_USER);
IPage<Resource> resourcePage = new Page<>(1,10);
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
Map<String, Object> result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(pageInfo.getLists()));
}
@Test
public void testQueryResourceList(){
User loginUser = new User();
loginUser.setId(0);
loginUser.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceListAuthored(0, 0,0)).thenReturn(getResourceList());
Map<String, Object> result = resourcesService.queryResourceList(loginUser, ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
List<Resource> resourceList = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resourceList));
}
@Test
public void testDelete(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
// HDFS_NOT_STARTUP
Result result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
// USER_NO_OPERATION_PERM
result = resourcesService.delete(loginUser,2);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//SUCCESS
loginUser.setTenantId(1);
Mockito.when(hadoopUtils.delete(Mockito.anyString(), Mockito.anyBoolean())).thenReturn(true);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
} catch (Exception e) {
logger.error("delete error",e);
Assert.assertTrue(false);
}
}
@Test
public void testVerifyResourceName(){
User user = new User();
user.setId(1);
Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
String unExistFullName = "/test.jar";
try {
Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_FILE_EXIST
user.setTenantId(1);
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
logger.error("hadoop error",e);
}
PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testReadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
//HDFS_NOT_STARTUP
Result result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(2,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode()==result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_FILE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
try {
Mockito.when(hadoopUtils.exists(Mockito.anyString())).thenReturn(false);
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertTrue(Status.RESOURCE_FILE_NOT_EXIST.getCode()==result.getCode());
//SUCCESS
try {
Mockito.when(hadoopUtils.exists(null)).thenReturn(true);
Mockito.when(hadoopUtils.catFile(null,1,10)).thenReturn(getContent());
} catch (IOException e) {
logger.error("hadoop error",e);
}
result = resourcesService.readResource(1,1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testOnlineCreateResource() {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
PowerMockito.when(HadoopUtils.getHdfsResDir("hdfsdDir")).thenReturn("hdfsDir");
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//RuntimeException
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
}
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
}
@Test
public void testUpdateResourceContent(){
User loginUser = new User();
loginUser.setId(0);
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
// HDFS_NOT_STARTUP
Result result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(), result.getMsg());
//RESOURCE_NOT_EXIST
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
result = resourcesService.updateResourceContent(2,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
//USER_NOT_EXIST
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertTrue(Status.TENANT_NOT_EXIST.getCode() == result.getCode());
//SUCCESS
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
result = resourcesService.updateResourceContent(1,"content");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(), result.getMsg());
}
@Test
public void testDownloadResource(){
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
org.springframework.core.io.Resource resourceMock = Mockito.mock(org.springframework.core.io.Resource.class);
try {
//resource null
org.springframework.core.io.Resource resource = resourcesService.downloadResource(1);
Assert.assertNull(resource);
Mockito.when(resourcesMapper.selectById(1)).thenReturn(getResource());
PowerMockito.when(org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(Mockito.any())).thenReturn(resourceMock);
resource = resourcesService.downloadResource(1);
Assert.assertNotNull(resource);
} catch (Exception e) {
logger.error("DownloadResource error",e);
Assert.assertTrue(false);
}
}
@Test
public void testUnauthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryResourceExceptUserId(1)).thenReturn(getResourceList());
result = resourcesService.unauthorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
@Test
public void testUnauthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryUdfFuncExceptUserId(1)).thenReturn(getUdfFuncList());
result = resourcesService.unauthorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedUDFFunction(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(udfFunctionMapper.queryAuthedUdfFunc(1)).thenReturn(getUdfFuncList());
result = resourcesService.authorizedUDFFunction(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<UdfFunc> udfFuncs = (List<UdfFunc>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(udfFuncs));
}
@Test
public void testAuthorizedFile(){
User user = getUser();
//USER_NO_OPERATION_PERM
Map<String, Object> result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.USER_NO_OPERATION_PERM,result.get(Constants.STATUS));
//SUCCESS
user.setUserType(UserType.ADMIN_USER);
Mockito.when(resourcesMapper.queryAuthorizedResourceList(1)).thenReturn(getResourceList());
result = resourcesService.authorizedFile(user,1);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS,result.get(Constants.STATUS));
List<Resource> resources = (List<Resource>) result.get(Constants.DATA_LIST);
Assert.assertTrue(CollectionUtils.isNotEmpty(resources));
}
private List<Resource> getResourceList(){
List<Resource> resources = new ArrayList<>();
resources.add(getResource());
return resources;
}
private Tenant getTenant() {
Tenant tenant = new Tenant();
tenant.setTenantCode("123");
return tenant;
}
private Resource getResource(){
Resource resource = new Resource();
resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
private Resource getUdfResource(){
Resource resource = new Resource();
resource.setUserId(1);
resource.setDescription("udfTest");
resource.setAlias("udfTest.jar");
resource.setFullName("/udfTest.jar");
resource.setType(ResourceType.UDF);
return resource;
}
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();
udfFunc.setId(1);
return udfFunc;
}
private List<UdfFunc> getUdfFuncList(){
List<UdfFunc> udfFuncs = new ArrayList<>();
udfFuncs.add(getUdfFunc());
return udfFuncs;
}
private User getUser(){
User user = new User();
user.setId(1);
user.setTenantId(1);
return user;
}
private List<String> getContent(){
List<String> contentList = new ArrayList<>();
contentList.add("test");
return contentList;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | .github/workflows/ci_backend.yml | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: Backend
on:
push:
paths:
- '.github/workflows/ci_backend.yml'
- 'package.xml'
- 'pom.xml'
- 'dolphinscheduler-alert/**'
- 'dolphinscheduler-api/**'
- 'dolphinscheduler-common/**'
- 'dolphinscheduler-dao/**'
- 'dolphinscheduler-rpc/**'
- 'dolphinscheduler-server/**'
pull_request:
paths:
- '.github/workflows/ci_backend.yml'
- 'package.xml'
- 'pom.xml'
- 'dolphinscheduler-alert/**'
- 'dolphinscheduler-api/**'
- 'dolphinscheduler-common/**'
- 'dolphinscheduler-dao/**'
- 'dolphinscheduler-rpc/**'
- 'dolphinscheduler-server/**'
jobs:
Compile-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
- name: checkout submodules
shell: bash
run: |
git submodule sync --recursive
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Compile
run: mvn -B clean compile package -Prelease -Dmaven.test.skip=true
License-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# In the checkout@v2, it doesn't support git submodule. Execute the commands manually.
- name: checkout submodules
shell: bash
run: |
git submodule sync --recursive
git -c protocol.version=2 submodule update --init --force --recursive --depth=1
- name: Set up JDK 1.8
uses: actions/setup-java@v1
with:
java-version: 1.8
- name: Check
run: mvn -B apache-rat:check
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | .gitignore | .git
.svn
.hg
.zip
.gz
.DS_Store
.idea
.idea/
.idea/*
.target
.target/
**/**/target/**
target/*
*/target
*/target/*
.settings
.nbproject
.classpath
.project
*.iml
*.ipr
*.iws
*.tgz
.*.swp
.vim
.tmp
node_modules
npm-debug.log
.vscode
logs/*
.www
t.*
yarn.lock
package-lock.json
config.gypi
test/coverage
/docs/zh_CN/介绍
/docs/zh_CN/贡献代码.md
/dolphinscheduler-common/src/main/resources/zookeeper.properties
dolphinscheduler-alert/logs/
dolphinscheduler-alert/src/main/resources/alert.properties_bak
dolphinscheduler-alert/src/main/resources/logback.xml
dolphinscheduler-server/src/main/resources/logback.xml
dolphinscheduler-ui/dist
dolphinscheduler-ui/node
dolphinscheduler-ui/dist/css/common.16ac5d9.css
dolphinscheduler-ui/dist/css/home/index.b444b91.css
dolphinscheduler-ui/dist/css/login/index.5866c64.css
dolphinscheduler-ui/dist/js/0.ac94e5d.js
dolphinscheduler-ui/dist/js/0.ac94e5d.js.map
dolphinscheduler-ui/dist/js/1.0b043a3.js
dolphinscheduler-ui/dist/js/1.0b043a3.js.map
dolphinscheduler-ui/dist/js/10.1bce3dc.js
dolphinscheduler-ui/dist/js/10.1bce3dc.js.map
dolphinscheduler-ui/dist/js/11.79f04d8.js
dolphinscheduler-ui/dist/js/11.79f04d8.js.map
dolphinscheduler-ui/dist/js/12.420daa5.js
dolphinscheduler-ui/dist/js/12.420daa5.js.map
dolphinscheduler-ui/dist/js/13.e5bae1c.js
dolphinscheduler-ui/dist/js/13.e5bae1c.js.map
dolphinscheduler-ui/dist/js/14.f2a0dca.js
dolphinscheduler-ui/dist/js/14.f2a0dca.js.map
dolphinscheduler-ui/dist/js/15.45373e8.js
dolphinscheduler-ui/dist/js/15.45373e8.js.map
dolphinscheduler-ui/dist/js/16.fecb0fc.js
dolphinscheduler-ui/dist/js/16.fecb0fc.js.map
dolphinscheduler-ui/dist/js/17.84be279.js
dolphinscheduler-ui/dist/js/17.84be279.js.map
dolphinscheduler-ui/dist/js/18.307ea70.js
dolphinscheduler-ui/dist/js/18.307ea70.js.map
dolphinscheduler-ui/dist/js/19.144db9c.js
dolphinscheduler-ui/dist/js/19.144db9c.js.map
dolphinscheduler-ui/dist/js/2.8b4ef29.js
dolphinscheduler-ui/dist/js/2.8b4ef29.js.map
dolphinscheduler-ui/dist/js/20.4c527e9.js
dolphinscheduler-ui/dist/js/20.4c527e9.js.map
dolphinscheduler-ui/dist/js/21.831b2a2.js
dolphinscheduler-ui/dist/js/21.831b2a2.js.map
dolphinscheduler-ui/dist/js/22.2b4bb2a.js
dolphinscheduler-ui/dist/js/22.2b4bb2a.js.map
dolphinscheduler-ui/dist/js/23.81467ef.js
dolphinscheduler-ui/dist/js/23.81467ef.js.map
dolphinscheduler-ui/dist/js/24.54a00e4.js
dolphinscheduler-ui/dist/js/24.54a00e4.js.map
dolphinscheduler-ui/dist/js/25.8d7bd36.js
dolphinscheduler-ui/dist/js/25.8d7bd36.js.map
dolphinscheduler-ui/dist/js/26.2ec5e78.js
dolphinscheduler-ui/dist/js/26.2ec5e78.js.map
dolphinscheduler-ui/dist/js/27.3ab48c2.js
dolphinscheduler-ui/dist/js/27.3ab48c2.js.map
dolphinscheduler-ui/dist/js/28.363088a.js
dolphinscheduler-ui/dist/js/28.363088a.js.map
dolphinscheduler-ui/dist/js/29.6c5853a.js
dolphinscheduler-ui/dist/js/29.6c5853a.js.map
dolphinscheduler-ui/dist/js/3.a0edb5b.js
dolphinscheduler-ui/dist/js/3.a0edb5b.js.map
dolphinscheduler-ui/dist/js/30.940fdd3.js
dolphinscheduler-ui/dist/js/30.940fdd3.js.map
dolphinscheduler-ui/dist/js/31.168a460.js
dolphinscheduler-ui/dist/js/31.168a460.js.map
dolphinscheduler-ui/dist/js/32.8df6594.js
dolphinscheduler-ui/dist/js/32.8df6594.js.map
dolphinscheduler-ui/dist/js/33.4480bbe.js
dolphinscheduler-ui/dist/js/33.4480bbe.js.map
dolphinscheduler-ui/dist/js/34.b407fe1.js
dolphinscheduler-ui/dist/js/34.b407fe1.js.map
dolphinscheduler-ui/dist/js/35.f340b0a.js
dolphinscheduler-ui/dist/js/35.f340b0a.js.map
dolphinscheduler-ui/dist/js/36.8880c2d.js
dolphinscheduler-ui/dist/js/36.8880c2d.js.map
dolphinscheduler-ui/dist/js/37.ea2a25d.js
dolphinscheduler-ui/dist/js/37.ea2a25d.js.map
dolphinscheduler-ui/dist/js/38.98a59ee.js
dolphinscheduler-ui/dist/js/38.98a59ee.js.map
dolphinscheduler-ui/dist/js/39.a5e958a.js
dolphinscheduler-ui/dist/js/39.a5e958a.js.map
dolphinscheduler-ui/dist/js/4.4ca44db.js
dolphinscheduler-ui/dist/js/4.4ca44db.js.map
dolphinscheduler-ui/dist/js/40.e187b1e.js
dolphinscheduler-ui/dist/js/40.e187b1e.js.map
dolphinscheduler-ui/dist/js/41.0e89182.js
dolphinscheduler-ui/dist/js/41.0e89182.js.map
dolphinscheduler-ui/dist/js/42.341047c.js
dolphinscheduler-ui/dist/js/42.341047c.js.map
dolphinscheduler-ui/dist/js/43.27b8228.js
dolphinscheduler-ui/dist/js/43.27b8228.js.map
dolphinscheduler-ui/dist/js/44.e8869bc.js
dolphinscheduler-ui/dist/js/44.e8869bc.js.map
dolphinscheduler-ui/dist/js/45.8d54901.js
dolphinscheduler-ui/dist/js/45.8d54901.js.map
dolphinscheduler-ui/dist/js/5.e1ed7f3.js
dolphinscheduler-ui/dist/js/5.e1ed7f3.js.map
dolphinscheduler-ui/dist/js/6.241ba07.js
dolphinscheduler-ui/dist/js/6.241ba07.js.map
dolphinscheduler-ui/dist/js/7.ab2e297.js
dolphinscheduler-ui/dist/js/7.ab2e297.js.map
dolphinscheduler-ui/dist/js/8.83ff814.js
dolphinscheduler-ui/dist/js/8.83ff814.js.map
dolphinscheduler-ui/dist/js/9.39cb29f.js
dolphinscheduler-ui/dist/js/9.39cb29f.js.map
dolphinscheduler-ui/dist/js/common.733e342.js
dolphinscheduler-ui/dist/js/common.733e342.js.map
dolphinscheduler-ui/dist/js/home/index.78a5d12.js
dolphinscheduler-ui/dist/js/home/index.78a5d12.js.map
dolphinscheduler-ui/dist/js/login/index.291b8e3.js
dolphinscheduler-ui/dist/js/login/index.291b8e3.js.map
dolphinscheduler-ui/dist/lib/external/
/dolphinscheduler-dao/src/main/resources/dao/data_source.properties
!/zookeeper_data/
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | dolphinscheduler-dist/release-docs/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================
Apache DolphinScheduler Subcomponents:
The Apache DolphinScheduler project contains subcomponents with separate copyright
notices and license terms. Your use of the source code for the these
subcomponents is subject to the terms and conditions of the following
licenses.
========================================================================
Apache 2.0 licenses
========================================================================
The following components are provided under the Apache License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
apacheds-i18n 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-i18n/2.0.0-M15, Apache 2.0
apacheds-kerberos-codec 2.0.0-M15: https://mvnrepository.com/artifact/org.apache.directory.server/apacheds-kerberos-codec/2.0.0-M15, Apache 2.0
apache-el 8.5.35.1: https://mvnrepository.com/artifact/org.mortbay.jasper/apache-el/8.5.35.1, Apache 2.0
api-asn1-api 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-asn1-api/1.0.0-M20, Apache 2.0
api-util 1.0.0-M20: https://mvnrepository.com/artifact/org.apache.directory.api/api-util/1.0.0-M20, Apache 2.0
audience-annotations 0.5.0: https://mvnrepository.com/artifact/org.apache.yetus/audience-annotations/0.5.0, Apache 2.0
avro 1.7.4: https://github.com/apache/avro, Apache 2.0
aws-sdk-java 1.7.4: https://mvnrepository.com/artifact/com.amazonaws/aws-java-sdk/1.7.4, Apache 2.0
bonecp 0.8.0.RELEASE: https://github.com/wwadge/bonecp, Apache 2.0
byte-buddy 1.9.10: https://mvnrepository.com/artifact/net.bytebuddy/byte-buddy/1.9.10, Apache 2.0
classmate 1.4.0: https://mvnrepository.com/artifact/com.fasterxml/classmate/1.4.0, Apache 2.0
clickhouse-jdbc 0.1.52: https://mvnrepository.com/artifact/ru.yandex.clickhouse/clickhouse-jdbc/0.1.52, Apache 2.0
commons-cli 1.2: https://mvnrepository.com/artifact/commons-cli/commons-cli/1.2, Apache 2.0
commons-codec 1.6: https://mvnrepository.com/artifact/commons-codec/commons-codec/1.6, Apache 2.0
commons-collections 3.2.2: https://mvnrepository.com/artifact/commons-collections/commons-collections/3.2.2, Apache 2.0
commons-collections4 4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-collections4/4.1, Apache 2.0
commons-compress 1.4.1: https://mvnrepository.com/artifact/org.apache.commons/commons-compress/1.4.1, Apache 2.0
commons-configuration 1.10: https://mvnrepository.com/artifact/commons-configuration/commons-configuration/1.10, Apache 2.0
commons-daemon 1.0.13 https://mvnrepository.com/artifact/commons-daemon/commons-daemon/1.0.13, Apache 2.0
commons-dbcp 1.4: https://github.com/apache/commons-dbcp, Apache 2.0
commons-el 1.0: https://mvnrepository.com/artifact/commons-el/commons-el/1.0, Apache 2.0
commons-email 1.5: https://github.com/apache/commons-email, Apache 2.0
commons-httpclient 3.0.1: https://mvnrepository.com/artifact/commons-httpclient/commons-httpclient/3.0.1, Apache 2.0
commons-io 2.4: https://github.com/apache/commons-io, Apache 2.0
commons-lang 2.3: https://github.com/apache/commons-lang, Apache 2.0
commons-lang3 3.5: https://mvnrepository.com/artifact/org.apache.commons/commons-lang3/3.5, Apache 2.0
commons-logging 1.1.1: https://github.com/apache/commons-logging, Apache 2.0
commons-math3 3.1.1: https://github.com/apache/commons-math, Apache 2.0
commans-net 3.1: https://github.com/apache/commons-net, Apache 2.0
commons-pool 1.6: https://github.com/apache/commons-pool, Apache 2.0
cron-utils 5.0.5: https://mvnrepository.com/artifact/com.cronutils/cron-utils/5.0.5, Apache 2.0
curator-client 2.12.0: https://mvnrepository.com/artifact/org.apache.curator/curator-client/2.12.0, Apache 2.0
curator-framework 2.12.0: https://mvnrepository.com/artifact/org.apache.curator/curator-framework/2.12.0, Apache 2.0
curator-recipes 2.12.0: https://mvnrepository.com/artifact/org.apache.curator/curator-recipes/2.12.0, Apache 2.0
datanucleus-api-jdo 4.2.1: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-api-jdo/4.2.1, Apache 2.0
datanucleus-core 4.1.6: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-core/4.1.6, Apache 2.0
datanucleus-rdbms 4.1.7: https://mvnrepository.com/artifact/org.datanucleus/datanucleus-rdbms/4.1.7, Apache 2.0
derby 10.14.2.0: https://github.com/apache/derby, Apache 2.0
druid 1.1.14: https://mvnrepository.com/artifact/com.alibaba/druid/1.1.14, Apache 2.0
error_prone_annotations 2.1.2: https://mvnrepository.com/artifact/com.google.errorprone/error_prone_annotations/2.1.2, Apache 2.0
fastjson 1.2.61: https://mvnrepository.com/artifact/com.alibaba/fastjson/1.2.61, Apache 2.0
freemarker 2.3.21: https://github.com/apache/freemarker, Apache 2.0
grpc-context 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-context/1.9.0, Apache 2.0
grpc-core 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-core/1.9.0, Apache 2.0
grpc-netty 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-netty/1.9.0, Apache 2.0
grpc-protobuf 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-protobuf/1.9.0, Apache 2.0
grpc-protobuf-lite 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-protobuf-lite/1.9.0, Apache 2.0
grpc-stub 1.9.0: https://mvnrepository.com/artifact/io.grpc/grpc-stub/1.9.0, Apache 2.0
gson 2.8.5: https://github.com/google/gson, Apache 2.0
guava 20.0: https://mvnrepository.com/artifact/com.google.guava/guava/20.0, Apache 2.0
guice 3.0: https://mvnrepository.com/artifact/com.google.inject/guice/3.0, Apache 2.0
guice-servlet 3.0: https://mvnrepository.com/artifact/com.google.inject.extensions/guice-servlet/3.0, Apache 2.0
hadoop-annotations 2.7.3:https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-annotations/2.7.3, Apache 2.0
hadoop-auth 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-auth/2.7.3, Apache 2.0
hadoop-aws 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-aws/2.7.3, Apache 2.0
hadoop-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client/2.7.3, Apache 2.0
hadoop-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common/2.7.3, Apache 2.0
hadoop-hdfs 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-hdfs/2.7.3, Apache 2.0
hadoop-mapreduce-client-app 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-app/2.7.3, Apache 2.0
hadoop-mapreduce-client-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-common/2.7.3, Apache 2.0
hadoop-mapreduce-client-core 2.7.3: https://mvnrepository.com/artifact/io.hops/hadoop-mapreduce-client-core/2.7.3, Apache 2.0
hadoop-mapreduce-client-jobclient 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-jobclient/2.7.3, Apache 2.0
hadoop-mapreduce-client-shuffle 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-mapreduce-client-shuffle/2.7.3, Apache 2.0
hadoop-yarn-api 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-api/2.7.3, Apache 2.0
hadoop-yarn-client 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-client/2.7.3, Apache 2.0
hadoop-yarn-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-common/2.7.3, Apache 2.0
hadoop-yarn-server-common 2.7.3: https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-yarn-server-common/2.7.3, Apache 2.0
hibernate-validator 6.0.14.Final: https://github.com/hibernate/hibernate-validator, Apache 2.0
HikariCP 3.2.0: https://mvnrepository.com/artifact/com.zaxxer/HikariCP/3.2.0, Apache 2.0
hive-common 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-common/2.1.0, Apache 2.0
hive-jdbc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc/2.1.0, Apache 2.0
hive-metastore 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-metastore/2.1.0, Apache 2.0
hive-orc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-orc/2.1.0, Apache 2.0
hive-serde 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-serde/2.1.0, Apache 2.0
hive-service 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service/2.1.0, Apache 2.0
hive-service-rpc 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-service-rpc/2.1.0, Apache 2.0
hive-storage-api 2.1.0: https://mvnrepository.com/artifact/org.apache.hive/hive-storage-api/2.1.0, Apache 2.0
htrace-core 3.1.0-incubating: https://mvnrepository.com/artifact/org.apache.htrace/htrace-core/3.1.0-incubating, Apache 2.0
httpclient 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpclient/4.4.1, Apache 2.0
httpcore 4.4.1: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpcore/4.4.1, Apache 2.0
httpmime 4.5.7: https://mvnrepository.com/artifact/org.apache.httpcomponents/httpmime/4.5.7, Apache 2.0
instrumentation-api 0.4.3: https://mvnrepository.com/artifact/com.google.instrumentation/instrumentation-api/0.4.3, Apache 2.0
jackson-annotations 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.core/jackson-annotations/2.9.8, Apache 2.0
jackson-core 2.9.8: https://github.com/FasterXML/jackson-core, Apache 2.0
jackson-core-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-core-asl/1.9.13, Apache 2.0
jackson-databind 2.9.8: https://github.com/FasterXML/jackson-databind, Apache 2.0
jackson-datatype-jdk8 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jdk8/2.9.8, Apache 2.0
jackson-datatype-jsr310 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.datatype/jackson-datatype-jsr310/2.9.8, Apache 2.0
jackson-jaxrs 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-jaxrs/1.9.13, Apache 2.0 and LGPL 2.1
jackson-mapper-asl 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-mapper-asl/1.9.13, Apache 2.0
jackson-module-parameter-names 2.9.8: https://mvnrepository.com/artifact/com.fasterxml.jackson.module/jackson-module-parameter-names/2.9.8, Apache 2.0
jackson-xc 1.9.13: https://mvnrepository.com/artifact/org.codehaus.jackson/jackson-xc/1.9.13, Apache 2.0 and LGPL 2.1
jasper-compiler 5.5.23: https://mvnrepository.com/artifact/tomcat/jasper-compiler/5.5.23, Apache 2.0
jasper-runtime 5.5.23: https://mvnrepository.com/artifact/tomcat/jasper-runtime/5.5.23, Apache 2.0
javax.inject 1: https://mvnrepository.com/artifact/javax.inject/javax.inject/1, Apache 2.0
javax.jdo-3.2.0-m3: https://mvnrepository.com/artifact/org.datanucleus/javax.jdo/3.2.0-m3, Apache 2.0
java-xmlbuilder 0.4 : https://mvnrepository.com/artifact/com.jamesmurty.utils/java-xmlbuilder/0.4, Apache 2.0
jboss-logging 3.3.2.Final: https://mvnrepository.com/artifact/org.jboss.logging/jboss-logging/3.3.2.Final, Apache 2.0
jdo-api 3.0.1: https://mvnrepository.com/artifact/javax.jdo/jdo-api/3.0.1, Apache 2.0
jets3t 0.9.0: https://mvnrepository.com/artifact/net.java.dev.jets3t/jets3t/0.9.0, Apache 2.0
jettison 1.1: https://github.com/jettison-json/jettison, Apache 2.0
jetty 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty/6.1.26, Apache 2.0 and EPL 1.0
jetty-continuation 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-continuation/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-http 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-http/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-io 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-io/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-security 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-security/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-server 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-server/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-servlet 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlet/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-servlets 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-servlets/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-util 6.1.26: https://mvnrepository.com/artifact/org.mortbay.jetty/jetty-util/6.1.26, Apache 2.0 and EPL 1.0
jetty-util 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-util/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-webapp 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-webapp/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jetty-xml 9.4.14.v20181114: https://mvnrepository.com/artifact/org.eclipse.jetty/jetty-xml/9.4.14.v20181114, Apache 2.0 and EPL 1.0
jna 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna/4.5.2, Apache 2.0 and LGPL 2.1
jna-platform 4.5.2: https://mvnrepository.com/artifact/net.java.dev.jna/jna-platform/4.5.2, Apache 2.0 and LGPL 2.1
joda-time 2.10.1: https://github.com/JodaOrg/joda-time, Apache 2.0
jpam 1.1: https://mvnrepository.com/artifact/net.sf.jpam/jpam/1.1, Apache 2.0
jsqlparser 2.1: https://github.com/JSQLParser/JSqlParser, Apache 2.0 or LGPL 2.1
jsr305 3.0.0: https://mvnrepository.com/artifact/com.google.code.findbugs/jsr305, Apache 2.0
libfb303 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libfb303/0.9.3, Apache 2.0
libthrift 0.9.3: https://mvnrepository.com/artifact/org.apache.thrift/libthrift/0.9.3, Apache 2.0
log4j 1.2.17: https://mvnrepository.com/artifact/log4j/log4j/1.2.17, Apache 2.0
log4j-1.2-api 2.11.2: https://mvnrepository.com/artifact/org.apache.logging.log4j/log4j-1.2-api/2.11.2, Apache 2.0
lz4 1.3.0: https://mvnrepository.com/artifact/net.jpountz.lz4/lz4/1.3.0, Apache 2.0
mapstruct 1.2.0.Final: https://github.com/mapstruct/mapstruct, Apache 2.0
mybatis 3.5.2 https://mvnrepository.com/artifact/org.mybatis/mybatis/3.5.2, Apache 2.0
mybatis-plus 3.2.0: https://github.com/baomidou/mybatis-plus, Apache 2.0
mybatis-plus-annotation 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-annotation/3.2.0, Apache 2.0
mybatis-plus-boot-starter 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-boot-starter/3.2.0, Apache 2.0
mybatis-plus-core 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-core/3.2.0, Apache 2.0
mybatis-plus-extension 3.2.0: https://mvnrepository.com/artifact/com.baomidou/mybatis-plus-extension/3.2.0, Apache 2.0
mybatis-spring 2.0.2: https://mvnrepository.com/artifact/org.mybatis/mybatis-spring/2.0.2, Apache 2.0
netty 3.6.2.Final: https://github.com/netty/netty, Apache 2.0
netty-all 4.1.33.Final: https://github.com/netty/netty/blob/netty-4.1.33.Final/LICENSE.txt, Apache 2.0
netty-buffer 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-buffer/4.1.33.Final, Apache 2.0
netty-codec 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-codec/4.1.33.Final, Apache 2.0
netty-codec-http2 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-codec-http2/4.1.33.Final, Apache 2.0
netty-codec-http 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-codec-http/4.1.33.Final, Apache 2.0
netty-codec-socks 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-codec-socks/4.1.33.Final, Apache 2.0
netty-common 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-common/4.1.33.Final, Apache 2.0
netty-handler 4.1.33: https://mvnrepository.com/artifact/io.netty/netty-handler/4.1.33.Final, Apache 2.0
netty-handler-proxy 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-handler-proxy/4.1.33.Final, Apache 2.0
netty-resolver 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-resolver/4.1.33.Final, Apache 2.0
netty-transport 4.1.33.Final: https://mvnrepository.com/artifact/io.netty/netty-transport/4.1.33.Final, Apache 2.0
opencensus-api 0.10.0: https://mvnrepository.com/artifact/io.opencensus/opencensus-api/0.10.0, Apache 2.0
opencensus-contrib-grpc-metrics 0.10.0: https://mvnrepository.com/artifact/io.opencensus/opencensus-contrib-grpc-metrics/0.10.0, Apache 2.0
opencsv 2.3: https://mvnrepository.com/artifact/net.sf.opencsv/opencsv/2.3, Apache 2.0
parquet-hadoop-bundle 1.8.1: https://mvnrepository.com/artifact/org.apache.parquet/parquet-hadoop-bundle/1.8.1, Apache 2.0
poi 3.17: https://mvnrepository.com/artifact/org.apache.poi/poi/3.17, Apache 2.0
proto-google-common-protos 1.0.0: https://mvnrepository.com/artifact/com.google.api.grpc/proto-google-common-protos/1.0.0, Apache 2.0
quartz 2.2.3: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz/2.2.3, Apache 2.0
quartz-jobs 2.2.3: https://mvnrepository.com/artifact/org.quartz-scheduler/quartz-jobs/2.2.3, Apache 2.0
snakeyaml 1.23: https://mvnrepository.com/artifact/org.yaml/snakeyaml/1.23, Apache 2.0
snappy 0.2: https://mvnrepository.com/artifact/org.iq80.snappy/snappy/0.2, Apache 2.0
snappy-java 1.0.4.1: https://github.com/xerial/snappy-java, Apache 2.0
spring-aop 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-aop/5.1.5.RELEASE, Apache 2.0
spring-beans 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-beans/5.1.5.RELEASE, Apache 2.0
spring-boot 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot/2.1.3.RELEASE, Apache 2.0
spring-boot-autoconfigure 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-autoconfigure/2.1.3.RELEASE, Apache 2.0
spring-boot-starter 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-aop 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-aop/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-jdbc 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jdbc/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-jetty 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-jetty/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-json 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-json/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-logging 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-logging/2.1.3.RELEASE, Apache 2.0
spring-boot-starter-web 2.1.3.RELEASE: https://mvnrepository.com/artifact/org.springframework.boot/spring-boot-starter-web/2.1.3.RELEASE, Apache 2.0
spring-context 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-context/5.1.5.RELEASE, Apache 2.0
spring-core 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-core, Apache 2.0
spring-expression 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-expression, Apache 2.0
springfox-core 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-core, Apache 2.0
springfox-schema 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-schema, Apache 2.0
springfox-spi 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spi, Apache 2.0
springfox-spring-web 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-spring-web/2.9.2, Apache 2.0
springfox-swagger2 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger2/2.9.2, Apache 2.0
springfox-swagger-common 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-common/2.9.2, Apache 2.0
springfox-swagger-ui 2.9.2: https://mvnrepository.com/artifact/io.springfox/springfox-swagger-ui/2.9.2, Apache 2.0
spring-jcl 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-jcl/5.1.5.RELEASE, Apache 2.0
spring-jdbc 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-jdbc/5.1.5.RELEASE, Apache 2.0
spring-plugin-core 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-core/1.2.0.RELEASE, Apache 2.0
spring-plugin-metadata 1.2.0.RELEASE: https://mvnrepository.com/artifact/org.springframework.plugin/spring-plugin-metadata/1.2.0.RELEASE, Apache 2.0
spring-tx 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-tx/5.1.5.RELEASE, Apache 2.0
spring-web 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-web/5.1.5.RELEASE, Apache 2.0
spring-webmvc 5.1.5.RELEASE: https://mvnrepository.com/artifact/org.springframework/spring-webmvc/5.1.5.RELEASE, Apache 2.0
swagger-annotations 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-annotations/1.5.20, Apache 2.0
swagger-bootstrap-ui 1.9.3: https://mvnrepository.com/artifact/com.github.xiaoymin/swagger-bootstrap-ui/1.9.3, Apache 2.0
swagger-models 1.5.20: https://mvnrepository.com/artifact/io.swagger/swagger-models/1.5.20, Apache 2.0
tephra-api 0.6.0: https://mvnrepository.com/artifact/co.cask.tephra/tephra-api/0.6.0, Apache 2.0
validation-api 2.0.1.Final: https://mvnrepository.com/artifact/javax.validation/validation-api/2.0.1.Final, Apache 2.0
xercesImpl 2.9.1: https://mvnrepository.com/artifact/xerces/xercesImpl/2.9.1, Apache 2.0
xml-apis 1.4.01: https://mvnrepository.com/artifact/xml-apis/xml-apis/1.4.01, Apache 2.0 and W3C
zookeeper 3.4.14: https://mvnrepository.com/artifact/org.apache.zookeeper/zookeeper/3.4.14, Apache 2.0
========================================================================
BSD licenses
========================================================================
The following components are provided under a BSD license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
asm 3.1: https://github.com/jdf/javalin/tree/master/lib/asm-3.1, BSD
javolution 5.5.1: https://mvnrepository.com/artifact/javolution/javolution/5.5.1, BSD
jline 0.9.94: https://github.com/jline/jline3, BSD
jsch 0.1.42: https://mvnrepository.com/artifact/com.jcraft/jsch/0.1.42, BSD
leveldbjni-all 1.8: https://github.com/fusesource/leveldbjni, BSD-3-Clause
postgresql 42.1.4: https://mvnrepository.com/artifact/org.postgresql/postgresql/42.1.4, BSD 2-clause
protobuf-java 3.5.1: https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java/3.5.1, BSD 3-clause
protobuf-java-util 3.5.1: https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java-util, BSD 3-clause
paranamer 2.3: https://mvnrepository.com/artifact/com.thoughtworks.paranamer/paranamer/2.3, BSD
threetenbp 1.3.6: https://mvnrepository.com/artifact/org.threeten/threetenbp/1.3.6, BSD 3-clause
xmlenc 0.52: https://mvnrepository.com/artifact/xmlenc/xmlenc/0.52, BSD
hamcrest-core 1.3: https://mvnrepository.com/artifact/org.hamcrest/hamcrest-core/1.3, BSD 2-Clause
========================================================================
CDDL licenses
========================================================================
The following components are provided under the CDDL License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
activation 1.1: https://mvnrepository.com/artifact/javax.activation/activation/1.1 CDDL 1.0
javax.activation-api 1.2.0: https://mvnrepository.com/artifact/javax.activation/javax.activation-api/1.2.0, CDDL and LGPL 2.0
javax.annotation-api 1.3.2: https://mvnrepository.com/artifact/javax.annotation/javax.annotation-api/1.3.2, CDDL + GPLv2
javax.mail 1.6.2: https://mvnrepository.com/artifact/com.sun.mail/javax.mail/1.6.2, CDDL/GPLv2
javax.servlet-api 3.1.0: https://mvnrepository.com/artifact/javax.servlet/javax.servlet-api/3.1.0, CDDL + GPLv2
jaxb-api 2.3.1: https://mvnrepository.com/artifact/javax.xml.bind/jaxb-api/2.3.1, CDDL 1.1
jaxb-impl 2.2.3-1: https://mvnrepository.com/artifact/com.sun.xml.bind/jaxb-impl/2.2.3-1, CDDL and GPL 1.1
jersey-client 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-client/1.9, CDDL 1.1 and GPL 1.1
jersey-core 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-core/1.9, CDDL 1.1 and GPL 1.1
jersey-guice 1.9: https://mvnrepository.com/artifact/com.sun.jersey.contribs/jersey-guice/1.9, CDDL 1.1 and GPL 1.1
jersey-json 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-json/1.9, CDDL 1.1 and GPL 1.1
jersey-server 1.9: https://mvnrepository.com/artifact/com.sun.jersey/jersey-server/1.9, CDDL 1.1 and GPL 1.1
jsp-api 2.1: https://mvnrepository.com/artifact/javax.servlet.jsp/jsp-api/2.1, CDDL and GPL 2.0
jta 1.1: https://mvnrepository.com/artifact/javax.transaction/jta/1.1, CDDL 1.0
transaction-api 1.1: https://mvnrepository.com/artifact/javax.transaction/transaction-api/1.1, CDDL 1.0
========================================================================
EPL licenses
========================================================================
The following components are provided under the EPL License. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
aspectjweaver 1.9.2:https://mvnrepository.com/artifact/org.aspectj/aspectjweaver/1.9.2, EPL 1.0
logback-classic 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-classic/1.2.3, EPL 1.0 and LGPL 2.1
logback-core 1.2.3: https://mvnrepository.com/artifact/ch.qos.logback/logback-core/1.2.3, EPL 1.0 and LGPL 2.1
oshi-core 3.5.0: https://mvnrepository.com/artifact/com.github.oshi/oshi-core/3.5.0, EPL 1.0
junit 4.12: https://mvnrepository.com/artifact/junit/junit/4.12, EPL 1.0
========================================================================
MIT licenses
========================================================================
The following components are provided under a MIT 2.0 license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
jul-to-slf4j 1.7.25: https://mvnrepository.com/artifact/org.slf4j/jul-to-slf4j/1.7.25, MIT
lombok 1.18.6: https://mvnrepository.com/artifact/org.projectlombok/lombok/1.18.6, MIT
mssql-jdbc 6.1.0.jre8: https://mvnrepository.com/artifact/com.microsoft.sqlserver/mssql-jdbc/6.1.0.jre8, MIT
slf4j-api 1.7.5: https://mvnrepository.com/artifact/org.slf4j/slf4j-api/1.7.5, MIT
========================================================================
MPL 1.1 licenses
========================================================================
The following components are provided under a MPL 1.1 license. See project link for details.
The text of each license is also included at licenses/LICENSE-[project].txt.
jamon-runtime 2.3.1: https://mvnrepository.com/artifact/org.jamon/jamon-runtime/2.3.1, MPL-1.1
========================================================================
Public Domain licenses
========================================================================
xz 1.0: https://mvnrepository.com/artifact/org.tukaani/xz/1.0, Public Domain
aopalliance 1.0: https://mvnrepository.com/artifact/aopalliance/aopalliance/1.0, Public Domain
========================================================================
UI related licenses
========================================================================
The following components are used in UI.See project link for details.
The text of each license is also included at licenses/ui-licenses/LICENSE-[project].txt.
========================================
MIT licenses
========================================
ans-UI 1.1.7: https://github.com/analysys/ans-ui MIT
axios 0.16.2: https://github.com/axios/axios MIT
bootstrap 3.3.7: https://github.com/twbs/bootstrap MIT
canvg 1.5.1: https://github.com/canvg/canvg MIT
clipboard 2.0.1: https://github.com/zenorocha/clipboard.js MIT
codemirror 5.43.0: https://github.com/codemirror/CodeMirror MIT
dayjs 1.7.8: https://github.com/iamkun/dayjs MIT
html2canvas 0.5.0-beta4: https://github.com/niklasvh/html2canvas MIT
jquery 3.3.1: https://github.com/jquery/jquery MIT
jquery-ui 1.12.1: https://github.com/jquery/jquery-ui MIT
js-cookie 2.2.1: https://github.com/js-cookie/js-cookie MIT
jsplumb 2.8.6: https://github.com/jsplumb/jsplumb MIT and GPLv2
lodash 4.17.11: https://github.com/lodash/lodash MIT
normalize.css 8.0.1: https://github.com/necolas/normalize.css MIT
vue-treeselect 0.4.0: https://github.com/riophae/vue-treeselect MIT
vue 2.5.17: https://github.com/vuejs/vue MIT
vue-router 2.7.0: https://github.com/vuejs/vue-router MIT
vuex 3.0.0: https://github.com/vuejs/vuex MIT
vuex-router-sync 4.1.2: https://github.com/vuejs/vuex-router-sync MIT
========================================
Apache 2.0 licenses
========================================
echarts 4.1.0: https://github.com/apache/incubator-echarts Apache-2.0
========================================
BSD licenses
========================================
d3 3.5.17: https://github.com/d3/d3 BSD-3-Clause
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | dolphinscheduler-dist/release-docs/licenses/LICENSE-lombok.txt | Copyright (C) 2009-2015 The Project Lombok Authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | dolphinscheduler-ui/package.json | {
"name": "dolphinscheduler-ui-frontend",
"version": "1.0.0",
"description": "A vue.js project",
"author": "DolphinScheduler",
"scripts": {
"build": "npm run clean && cross-env NODE_ENV=production webpack --config ./build/webpack.config.prod.js",
"dev": "cross-env NODE_ENV=development webpack-dev-server --config ./build/webpack.config.dev.js",
"clean": "rimraf dist",
"start": "npm run dev",
"build:release": "npm run clean && cross-env NODE_ENV=production PUBLIC_PATH=/dolphinscheduler/ui webpack --config ./build/webpack.config.release.js"
},
"dependencies": {
"@riophae/vue-treeselect": "^0.4.0",
"ans-ui": "1.1.9",
"axios": "^0.16.2",
"bootstrap": "3.3.7",
"canvg": "1.5.1",
"clipboard": "^2.0.1",
"codemirror": "^5.43.0",
"d3": "^3.5.17",
"dayjs": "^1.7.8",
"echarts": "4.1.0",
"html2canvas": "^0.5.0-beta4",
"jquery": "3.3.1",
"jquery-ui": "^1.12.1",
"js-cookie": "^2.2.1",
"jsplumb": "^2.8.6",
"lodash": "^4.17.11",
"normalize.css": "^8.0.1",
"vue": "^2.5.17",
"vue-router": "2.7.0",
"vuex": "^3.0.0",
"vuex-router-sync": "^5.0.0"
},
"devDependencies": {
"autoprefixer": "^9.1.0",
"babel-core": "^6.25.0",
"babel-helper-vue-jsx-merge-props": "^2.0.2",
"babel-loader": "^7.1.1",
"babel-plugin-syntax-dynamic-import": "^6.18.0",
"babel-plugin-syntax-jsx": "^6.18.0",
"babel-plugin-transform-class-properties": "^6.24.1",
"babel-plugin-transform-object-rest-spread": "^6.26.0",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-plugin-transform-vue-jsx": "^3.5.0",
"babel-preset-env": "^1.6.1",
"copy-webpack-plugin": "^4.5.2",
"cross-env": "^5.2.0",
"css-loader": "^0.28.8",
"cssnano": "4.1.10",
"env-parse": "^1.0.5",
"file-loader": "^5.0.2",
"globby": "^8.0.1",
"html-loader": "^0.5.5",
"html-webpack-plugin": "^3.2.0",
"mini-css-extract-plugin": "^0.8.2",
"node-sass": "^4.13.1",
"postcss-loader": "^3.0.0",
"progress-bar-webpack-plugin": "^1.12.1",
"rimraf": "^2.6.2",
"sass-loader": "^8.0.0",
"terser-webpack-plugin": "^2.1.3",
"url-loader": "^3.0.0",
"vue-loader": "^15.8.3",
"vue-style-loader": "^4.1.1",
"vue-template-compiler": "^2.5.16",
"webpack": "^4.41.3",
"webpack-cli": "^3.3.10",
"webpack-dev-server": "^3.9.0",
"webpack-merge": "^4.2.2"
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | pom.xml | <?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler</artifactId>
<version>1.2.1-SNAPSHOT</version>
<packaging>pom</packaging>
<name>${project.artifactId}</name>
<url>http://dolphinscheduler.apache.org</url>
<description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated
to solving the complex dependencies in data processing, making the scheduling system out of the box for data
processing.
</description>
<licenses>
<license>
<name>Apache License 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</connection>
<developerConnection>scm:git:https://github.com/apache/incubator-dolphinscheduler.git</developerConnection>
<url>https://github.com/apache/incubator-dolphinscheduler</url>
<tag>HEAD</tag>
</scm>
<mailingLists>
<mailingList>
<name>DolphinScheduler Developer List</name>
<post>[email protected]</post>
<subscribe>[email protected]</subscribe>
<unsubscribe>[email protected]</unsubscribe>
</mailingList>
</mailingLists>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>21</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
<curator.version>4.3.0</curator.version>
<spring.version>5.1.5.RELEASE</spring.version>
<spring.boot.version>2.1.3.RELEASE</spring.boot.version>
<java.version>1.8</java.version>
<logback.version>1.2.3</logback.version>
<hadoop.version>2.7.3</hadoop.version>
<quartz.version>2.2.3</quartz.version>
<jackson.version>2.9.8</jackson.version>
<mybatis-plus.version>3.2.0</mybatis-plus.version>
<mybatis.spring.version>2.0.1</mybatis.spring.version>
<cron.utils.version>5.0.5</cron.utils.version>
<fastjson.version>1.2.61</fastjson.version>
<druid.version>1.1.14</druid.version>
<h2.version>1.4.200</h2.version>
<commons.codec.version>1.6</commons.codec.version>
<commons.logging.version>1.1.1</commons.logging.version>
<httpclient.version>4.4.1</httpclient.version>
<httpcore.version>4.4.1</httpcore.version>
<junit.version>4.12</junit.version>
<mysql.connector.version>5.1.34</mysql.connector.version>
<slf4j.api.version>1.7.5</slf4j.api.version>
<slf4j.log4j12.version>1.7.5</slf4j.log4j12.version>
<commons.collections.version>3.2.2</commons.collections.version>
<commons.httpclient>3.0.1</commons.httpclient>
<commons.beanutils.version>1.7.0</commons.beanutils.version>
<commons.configuration.version>1.10</commons.configuration.version>
<commons.email.version>1.5</commons.email.version>
<poi.version>3.17</poi.version>
<javax.servlet.api.version>3.1.0</javax.servlet.api.version>
<commons.collections4.version>4.1</commons.collections4.version>
<guava.version>20.0</guava.version>
<postgresql.version>42.1.4</postgresql.version>
<hive.jdbc.version>2.1.0</hive.jdbc.version>
<commons.io.version>2.4</commons.io.version>
<oshi.core.version>3.5.0</oshi.core.version>
<clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version>
<mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version>
<jsp-2.1.version>6.1.14</jsp-2.1.version>
<spotbugs.version>3.1.12</spotbugs.version>
<checkstyle.version>3.0.0</checkstyle.version>
<apache.rat.version>0.13</apache.rat.version>
<zookeeper.version>3.4.14</zookeeper.version>
<frontend-maven-plugin.version>1.6</frontend-maven-plugin.version>
<maven-compiler-plugin.version>3.3</maven-compiler-plugin.version>
<maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version>
<maven-release-plugin.version>2.5.3</maven-release-plugin.version>
<maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version>
<maven-source-plugin.version>2.4</maven-source-plugin.version>
<maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version>
<maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version>
<rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version>
<jacoco.version>0.8.4</jacoco.version>
<jcip.version>1.0</jcip.version>
<maven.deploy.skip>false</maven.deploy.skip>
<cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version>
<mockito.version>2.21.0</mockito.version>
<powermock.version>2.0.2</powermock.version>
<servlet-api.version>2.5</servlet-api.version>
<swagger.version>1.9.3</swagger.version>
<springfox.version>2.9.2</springfox.version>
</properties>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<!-- quartz-->
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>org.quartz-scheduler</groupId>
<artifactId>quartz-jobs</artifactId>
<version>${quartz.version}</version>
</dependency>
<dependency>
<groupId>com.cronutils</groupId>
<artifactId>cron-utils</artifactId>
<version>${cron.utils.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>${fastjson.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>${spring.boot.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-core</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-context</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-beans</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-tx</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-jdbc</artifactId>
<version>${spring.version}</version>
</dependency>
<dependency>
<groupId>org.springframework</groupId>
<artifactId>spring-test</artifactId>
<version>${spring.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-dao</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-remote</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-service</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.dolphinscheduler</groupId>
<artifactId>dolphinscheduler-alert</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-framework</artifactId>
<version>${curator.version}</version>
</dependency>
<dependency>
<groupId>org.apache.curator</groupId>
<artifactId>curator-recipes</artifactId>
<version>${curator.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<artifactId>netty</artifactId>
<groupId>io.netty</groupId>
</exclusion>
<exclusion>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-annotations</artifactId>
</exclusion>
</exclusions>
<version>${zookeeper.version}</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons.codec.version}</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons.logging.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>${httpcore.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
<version>${jackson.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>${junit.version}</version>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<version>${mockito.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-mockito2</artifactId>
<version>${powermock.version}</version>
<type>jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.connector.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.h2database</groupId>
<artifactId>h2</artifactId>
<version>${h2.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j.api.version}</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j.log4j12.version}</version>
</dependency>
<dependency>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
<version>${commons.collections.version}</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>${commons.httpclient}</version>
</dependency>
<dependency>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<version>${commons.beanutils.version}</version>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<version>${commons.configuration.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>${logback.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-email</artifactId>
<version>${commons.email.version}</version>
</dependency>
<!--excel poi-->
<dependency>
<groupId>org.apache.poi</groupId>
<artifactId>poi</artifactId>
<version>${poi.version}</version>
</dependency>
<!-- hadoop -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
<exclusion>
<artifactId>com.sun.jersey</artifactId>
<groupId>jersey-json</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>${commons.collections4.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>${guava.version}</version>
</dependency>
<dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>${postgresql.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>${hive.jdbc.version}</version>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>${commons.io.version}</version>
</dependency>
<dependency>
<groupId>com.github.oshi</groupId>
<artifactId>oshi-core</artifactId>
<version>${oshi.core.version}</version>
</dependency>
<dependency>
<groupId>ru.yandex.clickhouse</groupId>
<artifactId>clickhouse-jdbc</artifactId>
<version>${clickhouse.jdbc.version}</version>
</dependency>
<dependency>
<groupId>com.microsoft.sqlserver</groupId>
<artifactId>mssql-jdbc</artifactId>
<version>${mssql.jdbc.version}</version>
</dependency>
<dependency>
<groupId>net.jcip</groupId>
<artifactId>jcip-annotations</artifactId>
<version>${jcip.version}</version>
<optional>true</optional>
</dependency>
<!-- for api module -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jsp-2.1.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
</dependency>
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<version>${javax.servlet.api.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger2</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>io.springfox</groupId>
<artifactId>springfox-swagger-ui</artifactId>
<version>${springfox.version}</version>
</dependency>
<dependency>
<groupId>com.github.xiaoymin</groupId>
<artifactId>swagger-bootstrap-ui</artifactId>
<version>${swagger.version}</version>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<finalName>apache-dolphinscheduler-incubating-${project.version}</finalName>
<pluginManagement>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>rpm-maven-plugin</artifactId>
<version>${rpm-maven-plugion.version}</version>
<inherited>false</inherited>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<testSource>${java.version}</testSource>
<testTarget>${java.version}</testTarget>
</configuration>
<version>${maven-compiler-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<tagNameFormat>@{project.version}</tagNameFormat>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven-assembly-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<version>${maven-source-plugin.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>${maven-dependency-plugin.version}</version>
</plugin>
</plugins>
</pluginManagement>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<executions>
<execution>
<id>attach-sources</id>
<phase>verify</phase>
<goals>
<goal>jar-no-fork</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<version>${maven-javadoc-plugin.version}</version>
<executions>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
<configuration>
<aggregate>true</aggregate>
<charset>${project.build.sourceEncoding}</charset>
<encoding>${project.build.sourceEncoding}</encoding>
<docencoding>${project.build.sourceEncoding}</docencoding>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-release-plugin</artifactId>
<version>${maven-release-plugin.version}</version>
<configuration>
<autoVersionSubmodules>true</autoVersionSubmodules>
<tagNameFormat>@{project.version}</tagNameFormat>
<tagBase>${project.version}</tagBase>
<!--<goals>-f pom.xml deploy</goals>-->
</configuration>
<dependencies>
<dependency>
<groupId>org.apache.maven.scm</groupId>
<artifactId>maven-scm-provider-jgit</artifactId>
<version>1.9.5</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<source>${java.version}</source>
<target>${java.version}</target>
<encoding>${project.build.sourceEncoding}</encoding>
<skip>false</skip><!--not skip compile test classes-->
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>${maven-surefire-plugin.version}</version>
<configuration>
<includes>
<include>**/alert/template/AlertTemplateFactoryTest.java</include>
<include>**/alert/template/impl/DefaultHTMLTemplateTest.java</include>
<include>**/alert/utils/EnterpriseWeChatUtilsTest.java</include>
<include>**/alert/utils/ExcelUtilsTest.java</include>
<include>**/alert/utils/FuncUtilsTest.java</include>
<include>**/alert/utils/JSONUtilsTest.java</include>
<include>**/alert/utils/MailUtilsTest.java</include>
<include>**/alert/plugin/EmailAlertPluginTest.java</include>
<include>**/api/dto/resources/filter/ResourceFilterTest.java</include>
<include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include>
<include>**/api/enums/testGetEnum.java</include>
<include>**/api/enums/StatusTest.java</include>
<include>**/api/exceptions/ApiExceptionHandlerTest.java</include>
<include>**/api/exceptions/ServiceExceptionTest.java</include>
<include>**/api/interceptor/LoginHandlerInterceptorTest.java</include>
<include>**/api/security/PasswordAuthenticatorTest.java</include>
<include>**/api/security/SecurityConfigTest.java</include>
<include>**/api/service/AccessTokenServiceTest.java</include>
<include>**/api/service/AlertGroupServiceTest.java</include>
<include>**/api/service/BaseDAGServiceTest.java</include>
<include>**/api/service/BaseServiceTest.java</include>
<include>**/api/service/DataAnalysisServiceTest.java</include>
<include>**/api/service/DataSourceServiceTest.java</include>
<include>**/api/service/ExecutorService2Test.java</include>
<include>**/api/service/ExecutorServiceTest.java</include>
<include>**/api/service/LoggerServiceTest.java</include>
<include>**/api/service/MonitorServiceTest.java</include>
<include>**/api/service/ProcessDefinitionServiceTest.java</include>
<include>**/api/service/ProcessInstanceServiceTest.java</include>
<include>**/api/service/ProjectServiceTest.java</include>
<include>**/api/service/QueueServiceTest.java</include>
<include>**/api/service/ResourcesServiceTest.java</include>
<include>**/api/service/SchedulerServiceTest.java</include>
<include>**/api/service/SessionServiceTest.java</include>
<include>**/api/service/TaskInstanceServiceTest.java</include>
<include>**/api/service/TenantServiceTest.java</include>
<include>**/api/service/UdfFuncServiceTest.java</include>
<include>**/api/service/UserAlertGroupServiceTest.java</include>
<include>**/api/service/UsersServiceTest.java</include>
<include>**/api/service/WorkerGroupServiceTest.java</include>
<include>**/api/controller/ProcessDefinitionControllerTest.java</include>
<include>**/api/utils/exportprocess/DataSourceParamTest.java</include>
<include>**/api/utils/exportprocess/DependentParamTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/FileUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/CheckUtilsTest.java</include>
<include>**/api/utils/ResultTest.java</include>
<include>**/common/graph/DAGTest.java</include>
<include>**/common/os/OshiTest.java</include>
<include>**/common/os/OSUtilsTest.java</include>
<include>**/common/shell/ShellExecutorTest.java</include>
<include>**/common/task/EntityTestUtils.java</include>
<include>**/common/task/FlinkParametersTest.java</include>
<include>**/common/task/SqoopParameterEntityTest.java</include>
<include>**/common/threadutils/ThreadPoolExecutorsTest.java</include>
<include>**/common/threadutils/ThreadUtilsTest.java</include>
<include>**/common/utils/process/ProcessBuilderForWin32Test.java</include>
<include>**/common/utils/process/ProcessEnvironmentForWin32Test.java</include>
<include>**/common/utils/process/ProcessImplForWin32Test.java</include>
<include>**/common/utils/CollectionUtilsTest.java</include>
<include>**/common/utils/CommonUtilsTest.java</include>
<include>**/common/utils/DateUtilsTest.java</include>
<include>**/common/utils/DependentUtilsTest.java</include>
<include>**/common/utils/EncryptionUtilsTest.java</include>
<include>**/common/utils/FileUtilsTest.java</include>
<include>**/common/utils/IpUtilsTest.java</include>
<include>**/common/utils/JSONUtilsTest.java</include>
<include>**/common/utils/LoggerUtilsTest.java</include>
<include>**/common/utils/OSUtilsTest.java</include>
<include>**/common/utils/ParameterUtilsTest.java</include>
<include>**/common/utils/PreconditionsTest.java</include>
<include>**/common/utils/PropertyUtilsTest.java</include>
<include>**/common/utils/SchemaUtilsTest.java</include>
<include>**/common/utils/ScriptRunnerTest.java</include>
<include>**/common/utils/SensitiveLogUtilsTest.java</include>
<include>**/common/utils/StringTest.java</include>
<include>**/common/utils/StringUtilsTest.java</include>
<include>**/common/utils/TaskParametersUtilsTest.java</include>
<include>**/common/utils/HadoopUtilsTest.java</include>
<include>**/common/utils/HttpUtilsTest.java</include>
<include>**/common/ConstantsTest.java</include>
<include>**/common/utils/HadoopUtils.java</include>
<include>**/common/plugin/FilePluginManagerTest</include>
<include>**/common/plugin/PluginClassLoaderTest</include>
<include>**/dao/mapper/AccessTokenMapperTest.java</include>
<include>**/dao/mapper/AlertGroupMapperTest.java</include>
<include>**/dao/mapper/CommandMapperTest.java</include>
<include>**/dao/mapper/ConnectionFactoryTest.java</include>
<include>**/dao/mapper/DataSourceMapperTest.java</include>
<include>**/remote/FastJsonSerializerTest.java</include>
<include>**/remote/NettyRemotingClientTest.java</include>
<include>**/remote/ResponseFutureTest.java</include>
<include>**/server/log/MasterLogFilterTest.java</include>
<include>**/server/log/SensitiveDataConverterTest.java</include>
<include>**/server/log/TaskLogDiscriminatorTest.java</include>
<include>**/server/log/TaskLogFilterTest.java</include>
<include>**/server/log/WorkerLogFilterTest.java</include>
<include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include>
<include>**/server/master/runner/MasterTaskExecThreadTest.java</include>
<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>
<include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include>
<include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include>
<include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include>
<include>**/server/master/register/MasterRegistryTest.java</include>
<include>**/server/master/AlertManagerTest.java</include>
<include>**/server/master/MasterCommandTest.java</include>
<include>**/server/master/DependentTaskTest.java</include>
<include>**/server/master/ConditionsTaskTest.java</include>
<include>**/server/master/MasterExecThreadTest.java</include>
<include>**/server/master/ParamsTest.java</include>
<include>**/server/register/ZookeeperNodeManagerTest.java</include>
<include>**/server/utils/DataxUtilsTest.java</include>
<include>**/server/utils/ExecutionContextTestUtils.java</include>
<include>**/server/utils/FlinkArgsUtilsTest.java</include>
<include>**/server/utils/ParamUtilsTest.java</include>
<include>**/server/utils/ProcessUtilsTest.java</include>
<include>**/server/utils/SparkArgsUtilsTest.java</include>
<include>**/server/worker/processor/TaskCallbackServiceTest.java</include>
<include>**/server/worker/registry/WorkerRegistryTest.java</include>
<include>**/server/worker/shell/ShellCommandExecutorTest.java</include>
<include>**/server/worker/sql/SqlExecutorTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/task/EnvFileTest.java</include>
<include>**/server/worker/task/spark/SparkTaskTest.java</include>
<include>**/server/worker/EnvFileTest.java</include>
<include>**/service/quartz/cron/CronUtilsTest.java</include>
<include>**/service/zk/DefaultEnsembleProviderTest.java</include>
<include>**/service/zk/ZKServerTest.java</include>
<include>**/service/queue/TaskUpdateQueueTest.java</include>
<include>**/dao/mapper/DataSourceUserMapperTest.java</include>
<include>**/dao/mapper/ErrorCommandMapperTest.java</include>
<include>**/dao/mapper/ProcessDefinitionMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include>
<include>**/dao/mapper/ProcessInstanceMapperTest.java</include>
<include>**/dao/mapper/ProjectMapperTest.java</include>
<include>**/dao/mapper/ProjectUserMapperTest.java</include>
<include>**/dao/mapper/QueueMapperTest.java</include>
<!--<include>**/dao/mapper/ResourceMapperTest.java</include>-->
<include>**/dao/mapper/ResourceUserMapperTest.java</include>
<include>**/dao/mapper/ScheduleMapperTest.java</include>
<include>**/dao/mapper/SessionMapperTest.java</include>
<include>**/dao/mapper/TaskInstanceMapperTest.java</include>
<include>**/dao/mapper/TenantMapperTest.java</include>
<include>**/dao/mapper/UdfFuncMapperTest.java</include>
<include>**/dao/mapper/UDFUserMapperTest.java</include>
<include>**/dao/mapper/UserAlertGroupMapperTest.java</include>
<include>**/dao/mapper/UserMapperTest.java</include>
<include>**/dao/utils/DagHelperTest.java</include>
<include>**/dao/AlertDaoTest.java</include>
<include>**/plugin/model/AlertDataTest.java</include>
<include>**/plugin/model/AlertInfoTest.java</include>
<include>**/plugin/utils/PropertyUtilsTest.java</include>
</includes>
<!-- <skip>true</skip> -->
</configuration>
</plugin>
<!-- jenkins plugin jacoco report-->
<plugin>
<groupId>org.jacoco</groupId>
<artifactId>jacoco-maven-plugin</artifactId>
<version>${jacoco.version}</version>
<configuration>
<destFile>target/jacoco.exec</destFile>
<dataFile>target/jacoco.exec</dataFile>
</configuration>
<executions>
<execution>
<id>jacoco-initialize</id>
<goals>
<goal>prepare-agent</goal>
</goals>
</execution>
<execution>
<id>jacoco-site</id>
<phase>test</phase>
<goals>
<goal>report</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>
<artifactId>apache-rat-plugin</artifactId>
<version>${apache.rat.version}</version>
<configuration>
<excludes>
<exclude>**/node_modules/**</exclude>
<exclude>**/node/**</exclude>
<exclude>**/dist/**</exclude>
<exclude>**/licenses/**</exclude>
<exclude>**/src/sass/common/_animation.scss</exclude>
<exclude>**/src/sass/common/_normalize.scss</exclude>
<exclude>.github/**</exclude>
<exclude>sql/soft_version</exclude>
<exclude>**/*.json</exclude>
<!-- document files -->
<exclude>**/*.md</exclude>
<excldue>**/*.MD</excldue>
<exclude>**/*.txt</exclude>
<exclude>**/docs/**</exclude>
<exclude>**/*.babelrc</exclude>
<exclude>**/*.eslintrc</exclude>
<exclude>**/.mvn/jvm.config</exclude>
<exclude>**/.mvn/wrapper/maven-wrapper.properties</exclude>
</excludes>
<consoleOutput>true</consoleOutput>
</configuration>
</plugin>
<plugin>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs-maven-plugin</artifactId>
<version>${spotbugs.version}</version>
<configuration>
<xmlOutput>true</xmlOutput>
<threshold>medium</threshold>
<effort>default</effort>
<excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile>
<failOnError>true</failOnError>
</configuration>
<dependencies>
<dependency>
<groupId>com.github.spotbugs</groupId>
<artifactId>spotbugs</artifactId>
<version>4.0.0-beta4</version>
</dependency>
</dependencies>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>${checkstyle.version}</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
<artifactId>checkstyle</artifactId>
<version>8.18</version>
</dependency>
</dependencies>
<configuration>
<consoleOutput>true</consoleOutput>
<encoding>UTF-8</encoding>
<configLocation>style/checkstyle.xml</configLocation>
<suppressionsLocation>style/checkstyle-suppressions.xml</suppressionsLocation>
<suppressionsFileExpression>checkstyle.suppressions.file</suppressionsFileExpression>
<failOnViolation>true</failOnViolation>
<violationSeverity>warning</violationSeverity>
<includeTestSourceDirectory>true</includeTestSourceDirectory>
<sourceDirectories>
<sourceDirectory>${project.build.sourceDirectory}</sourceDirectory>
</sourceDirectories>
<excludes>**\/generated-sources\/</excludes>
<skip>true</skip>
</configuration>
<executions>
<execution>
<phase>compile</phase>
<goals>
<goal>check</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>cobertura-maven-plugin</artifactId>
<version>${cobertura-maven-plugin.version}</version>
<configuration>
<check>
</check>
<aggregate>true</aggregate>
<outputDirectory>./target/cobertura</outputDirectory>
<encoding>${project.build.sourceEncoding}</encoding>
<quiet>true</quiet>
<format>xml</format>
<instrumentation>
<ignoreTrivial>true</ignoreTrivial>
</instrumentation>
</configuration>
</plugin>
</plugins>
</build>
<modules>
<module>dolphinscheduler-ui</module>
<module>dolphinscheduler-server</module>
<module>dolphinscheduler-common</module>
<module>dolphinscheduler-api</module>
<module>dolphinscheduler-dao</module>
<module>dolphinscheduler-alert</module>
<module>dolphinscheduler-dist</module>
<module>dolphinscheduler-remote</module>
<module>dolphinscheduler-service</module>
<module>dolphinscheduler-plugin-api</module>
</modules>
</project>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | tools/dependencies/check-LICENSE.sh | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 1,578 | [CI/CD]Checking the dependency licenses | Track the root issue #1460
Apache has strict requirements for license, so ci needs to verify license, you can refer to skywalking or other apache dependence license check flow.
skywalking link: https://github.com/apache/skywalking/blob/master/tools/dependencies/check-LICENSE.sh
if you want to contribute, you may reply "i want to implement it" | https://github.com/apache/dolphinscheduler/issues/1578 | https://github.com/apache/dolphinscheduler/pull/2552 | 6e81dd3b582546c9048b3508198dddcfbc1d3282 | 6e08b29254f3a5c340172c34a6a67e3cc8af0c48 | "2019-12-25T13:53:23Z" | java | "2020-05-02T14:19:20Z" | tools/dependencies/known-dependencies.txt | |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,578 | [BUG]wrong description of work flow copy tips | ![image](https://user-images.githubusercontent.com/29528966/80588031-28815900-8a4a-11ea-8c41-595890f0c529.png)
version:
dev
i think the tips should be "work flow copy" | https://github.com/apache/dolphinscheduler/issues/2578 | https://github.com/apache/dolphinscheduler/pull/2612 | 6a92bef5e36dd6c7100517cfa516ca9c8603d950 | fadfb28187f48648230094e05f946d2b41e8f67a | "2020-04-29T10:50:58Z" | java | "2020-05-06T13:33:08Z" | dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/definition/pages/list/_source/list.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<div class="list-model" style="position: relative;">
<div class="table-box">
<table class="fixed">
<tr>
<th scope="col" style="min-width: 50px">
<x-checkbox @on-change="_topCheckBoxClick" v-model="checkAll"></x-checkbox>
</th>
<th scope="col" style="min-width: 40px">
<span>{{$t('#')}}</span>
</th>
<th scope="col" style="min-width: 200px;max-width: 300px;">
<span>{{$t('Process Name')}}</span>
</th>
<th scope="col" style="min-width: 50px">
<span>{{$t('State')}}</span>
</th>
<th scope="col" style="min-width: 130px">
<span>{{$t('Create Time')}}</span>
</th>
<th scope="col" style="min-width: 130px">
<span>{{$t('Update Time')}}</span>
</th>
<th scope="col" style="min-width: 150px">
<span>{{$t('Description')}}</span>
</th>
<th scope="col" style="min-width: 70px">
<span>{{$t('Modify User')}}</span>
</th>
<th scope="col" style="min-width: 70px">
<div style="width: 80px">
<span>{{$t('Timing state')}}</span>
</div>
</th>
<th scope="col" style="min-width: 300px">
<span>{{$t('Operation')}}</span>
</th>
</tr>
<tr v-for="(item, $index) in list" :key="item.id">
<td width="50"><x-checkbox v-model="item.isCheck" :disabled="item.releaseState === 'ONLINE'" @on-change="_arrDelChange"></x-checkbox></td>
<td width="50">
<span>{{parseInt(pageNo === 1 ? ($index + 1) : (($index + 1) + (pageSize * (pageNo - 1))))}}</span>
</td>
<td style="min-width: 200px;max-width: 300px;padding-right: 10px;">
<span class="ellipsis">
<router-link :to="{ path: '/projects/definition/list/' + item.id}" tag="a" class="links" :title="item.name">
{{item.name}}
</router-link>
</span>
</td>
<td><span>{{_rtPublishStatus(item.releaseState)}}</span></td>
<td>
<span v-if="item.createTime">{{item.createTime | formatDate}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.updateTime">{{item.updateTime | formatDate}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.description" class="ellipsis" v-tooltip.large.top.start.light="{text: item.description, maxWidth: '500px'}">{{item.description}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.modifyBy">{{item.modifyBy}}</span>
<span v-else>-</span>
</td>
<td>
<span v-if="item.scheduleReleaseState === 'OFFLINE'">{{$t('offline')}}</span>
<span v-if="item.scheduleReleaseState === 'ONLINE'">{{$t('online')}}</span>
<span v-if="!item.scheduleReleaseState">-</span>
</td>
<td>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Edit')" @click="_edit(item)" :disabled="item.releaseState === 'ONLINE'" icon="ans-icon-edit"><!--{{$t('编辑')}}--></x-button>
<x-button type="success" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Start')" @click="_start(item)" :disabled="item.releaseState !== 'ONLINE'" icon="ans-icon-play"><!--{{$t('启动')}}--></x-button>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Timing')" @click="_timing(item)" :disabled="item.releaseState !== 'ONLINE' || item.scheduleReleaseState !== null" icon="ans-icon-timer"><!--{{$t('定时')}}--></x-button>
<x-button type="warning" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('online')" @click="_poponline(item)" v-if="item.releaseState === 'OFFLINE'" icon="ans-icon-upward"><!--{{$t('下线')}}--></x-button>
<x-button type="error" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('offline')" @click="_downline(item)" v-if="item.releaseState === 'ONLINE'" icon="ans-icon-downward"><!--{{$t('上线')}}--></x-button>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Copy')" @click="_copyProcess(item)" :disabled="item.releaseState === 'ONLINE'" icon="ans-icon-copy"><!--{{$t('复制')}}--></x-button>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Cron Manage')" @click="_timingManage(item)" :disabled="item.releaseState !== 'ONLINE'" icon="ans-icon-datetime"><!--{{$t('定时管理')}}--></x-button>
<x-poptip
:ref="'poptip-delete-' + $index"
placement="bottom-end"
width="90">
<p>{{$t('Delete?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeDelete($index)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_delete(item,$index)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button
icon="ans-icon-trash"
type="error"
shape="circle"
size="xsmall"
:disabled="item.releaseState === 'ONLINE'"
data-toggle="tooltip"
:title="$t('delete')">
</x-button>
</template>
</x-poptip>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('TreeView')" @click="_treeView(item)" icon="ans-icon-node"><!--{{$t('树形图')}}--></x-button>
<x-button type="info" shape="circle" size="xsmall" data-toggle="tooltip" :title="$t('Export')" @click="_export(item)" icon="ans-icon-download"><!--{{$t('导出')}}--></x-button>
</td>
</tr>
</table>
</div>
<x-poptip
v-show="strSelectIds !== ''"
ref="poptipDeleteAll"
placement="bottom-start"
width="90">
<p>{{$t('Delete?')}}</p>
<div style="text-align: right; margin: 0;padding-top: 4px;">
<x-button type="text" size="xsmall" shape="circle" @click="_closeDelete(-1)">{{$t('Cancel')}}</x-button>
<x-button type="primary" size="xsmall" shape="circle" @click="_delete({},-1)">{{$t('Confirm')}}</x-button>
</div>
<template slot="reference">
<x-button size="xsmall" style="position: absolute; bottom: -48px; left: 22px;" >{{$t('Delete')}}</x-button>
</template>
</x-poptip>
<template v-if="strSelectIds !== ''">
<x-button size="xsmall" style="position: absolute; bottom: -48px; left: 80px;" @click="_batchExport(item)" >{{$t('Export')}}</x-button>
</template>
</div>
</template>
<script>
import _ from 'lodash'
import mStart from './start'
import mTiming from './timing'
import { mapActions } from 'vuex'
import { publishStatus } from '@/conf/home/pages/dag/_source/config'
export default {
name: 'definition-list',
data () {
return {
list: [],
strSelectIds: '',
checkAll: false
}
},
props: {
processList: Array,
pageNo: Number,
pageSize: Number
},
methods: {
...mapActions('dag', ['editProcessState', 'getStartCheck', 'getReceiver', 'deleteDefinition', 'batchDeleteDefinition','exportDefinition','copyProcess']),
_rtPublishStatus (code) {
return _.filter(publishStatus, v => v.code === code)[0].desc
},
_treeView (item) {
this.$router.push({ path: `/projects/definition/tree/${item.id}` })
},
/**
* Start
*/
_start (item) {
this.getStartCheck({ processDefinitionId: item.id }).then(res => {
let self = this
let modal = this.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mStart, {
on: {
onUpdate () {
self._onUpdate()
modal.remove()
},
close () {
modal.remove()
}
},
props: {
item: item
}
})
}
})
}).catch(e => {
this.$message.error(e.msg || '')
})
},
/**
* get emial
*/
_getReceiver (id) {
return new Promise((resolve, reject) => {
this.getReceiver({ processDefinitionId: id }).then(res => {
resolve({
receivers: res.receivers && res.receivers.split(',') || [],
receiversCc: res.receiversCc && res.receiversCc.split(',') || []
})
})
})
},
/**
* timing
*/
_timing (item) {
let self = this
this._getReceiver(item.id).then(res => {
let modal = this.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mTiming, {
on: {
onUpdate () {
self._onUpdate()
modal.remove()
},
close () {
modal.remove()
}
},
props: {
item: item,
receiversD: res.receivers,
receiversCcD: res.receiversCc,
type: 'timing'
}
})
}
})
})
},
/**
* Timing manage
*/
_timingManage (item) {
this.$router.push({ path: `/projects/definition/list/timing/${item.id}` })
},
/**
* Close the delete layer
*/
_closeDelete (i) {
if (i > 0) {
this.$refs[`poptip-delete-${i}`][0].doClose()
}else{
this.$refs['poptipDeleteAll'].doClose()
}
},
/**
* delete
*/
_delete (item, i) {
// remove tow++
if (i < 0) {
this._batchDelete()
return
}
// remove one
this.deleteDefinition({
processDefinitionId: item.id
}).then(res => {
this.$refs[`poptip-delete-${i}`][0].doClose()
this._onUpdate()
this.$message.success(res.msg)
}).catch(e => {
this.$refs[`poptip-delete-${i}`][0].doClose()
this.$message.error(e.msg || '')
})
},
/**
* edit
*/
_edit (item) {
this.$router.push({ path: `/projects/definition/list/${item.id}` })
},
/**
* Offline
*/
_downline (item) {
this._upProcessState({
processId: item.id,
releaseState: 0
})
},
/**
* online
*/
_poponline (item) {
this._upProcessState({
processId: item.id,
releaseState: 1
})
},
/**
* copy
*/
_copyProcess (item) {
this.copyProcess({
processId: item.id
}).then(res => {
this.$message.success(res.msg)
$('body').find('.tooltip.fade.top.in').remove()
this._onUpdate()
}).catch(e => {
this.$message.error(e.msg || '')
})
},
_export (item) {
this.exportDefinition({
processDefinitionIds: item.id,
fileName: item.name
}).catch(e => {
this.$message.error(e.msg || '')
})
},
_batchExport () {
this.exportDefinition({
processDefinitionIds: this.strSelectIds,
fileName: "process_"+new Date().getTime()
}).then(res => {
this._onUpdate()
this.checkAll = false
this.strSelectIds = ''
}).catch(e => {
this.strSelectIds = ''
this.checkAll = false
this.$message.error(e.msg)
})
},
/**
* Edit state
*/
_upProcessState (o) {
this.editProcessState(o).then(res => {
this.$message.success(res.msg)
$('body').find('.tooltip.fade.top.in').remove()
this._onUpdate()
}).catch(e => {
this.$message.error(e.msg || '')
})
},
_onUpdate () {
this.$emit('on-update')
},
/**
* click the select-all checkbox
*/
_topCheckBoxClick (is) {
_.map(this.list , v => v.isCheck = v.releaseState === 'ONLINE' ? false : is)
this._arrDelChange()
},
/**
* the array that to be delete
*/
_arrDelChange (v) {
let arr = []
this.list.forEach((item)=>{
if (item.isCheck) {
arr.push(item.id)
}
})
this.strSelectIds = _.join(arr, ',')
if (v === false) {
this.checkAll = false
}
},
/**
* batch delete
*/
_batchDelete () {
this.$refs['poptipDeleteAll'].doClose()
this.batchDeleteDefinition({
processDefinitionIds: this.strSelectIds
}).then(res => {
this._onUpdate()
this.checkAll = false
this.$message.success(res.msg)
}).catch(e => {
this.checkAll = false
this.$message.error(e.msg || '')
})
}
},
watch: {
processList: {
handler (a) {
this.checkAll = false
this.list = []
setTimeout(() => {
this.list = _.cloneDeep(a)
})
},
immediate: true,
deep: true
},
pageNo () {
this.strSelectIds = ''
}
},
created () {
},
mounted () {
},
components: { }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,578 | [BUG]wrong description of work flow copy tips | ![image](https://user-images.githubusercontent.com/29528966/80588031-28815900-8a4a-11ea-8c41-595890f0c529.png)
version:
dev
i think the tips should be "work flow copy" | https://github.com/apache/dolphinscheduler/issues/2578 | https://github.com/apache/dolphinscheduler/pull/2612 | 6a92bef5e36dd6c7100517cfa516ca9c8603d950 | fadfb28187f48648230094e05f946d2b41e8f67a | "2020-04-29T10:50:58Z" | java | "2020-05-06T13:33:08Z" | dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': 'User Name',
'Please enter user name': 'Please enter user name',
'Password': 'Password',
'Please enter your password': 'Please enter your password',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22',
'Login': 'Login',
'Home': 'Home',
'Failed to create node to save': 'Failed to create node to save',
'Global parameters': 'Global parameters',
'Local parameters': 'Local parameters',
'Copy success': 'Copy success',
'The browser does not support automatic copying': 'The browser does not support automatic copying',
'Whether to save the DAG graph': 'Whether to save the DAG graph',
'Current node settings': 'Current node settings',
'View history': 'View history',
'View log': 'View log',
'Enter this child node': 'Enter this child node',
'Node name': 'Node name',
'Please enter name(required)': 'Please enter name(required)',
'Run flag': 'Run flag',
'Normal': 'Normal',
'Prohibition execution': 'Prohibition execution',
'Please enter description': 'Please enter description',
'Number of failed retries': 'Number of failed retries',
'Times': 'Times',
'Failed retry interval': 'Failed retry interval',
'Minute': 'Minute',
'Cancel': 'Cancel',
'Confirm add': 'Confirm add',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process',
'The task has not been executed and cannot enter the sub-Process': 'The task has not been executed and cannot enter the sub-Process',
'Please enter name (required)': 'Please enter name (required)',
'Name already exists': 'Name already exists',
'Download Log': 'Download Log',
'Refresh Log': 'Refresh Log',
'Enter full screen': 'Enter full screen',
'Cancel full screen': 'Cancel full screen',
'Close': 'Close',
'Update log success': 'Update log success',
'No more logs': 'No more logs',
'No log': 'No log',
'Loading Log...': 'Loading Log...',
'Set the DAG diagram name': 'Set the DAG diagram name',
'Please enter description(optional)': 'Please enter description(optional)',
'Set global': 'Set global',
'Whether to update the process definition': 'Whether to update the process definition',
'Add': 'Add',
'DAG graph name cannot be empty': 'DAG graph name cannot be empty',
'Create Datasource': 'Create Datasource',
'Project Home': 'Project Home',
'Project Manage': 'Project',
'Create Project': 'Create Project',
'Cron Manage': 'Cron Manage',
'Tenant Manage': 'Tenant Manage',
'Create Tenant': 'Create Tenant',
'User Manage': 'User Manage',
'Create User': 'Create User',
'User Information': 'User Information',
'Edit Password': 'Edit Password',
'success': 'success',
'failed': 'failed',
'delete': 'delete',
'Please choose': 'Please choose',
'Please enter a positive integer': 'Please enter a positive integer',
'Program Type': 'Program Type',
'Main class': 'Main class',
'Main jar package': 'Main jar package',
'Please enter main jar package': 'Please enter main jar package',
'Command-line parameters': 'Command-line parameters',
'Please enter Command-line parameters': 'Please enter Command-line parameters',
'Other parameters': 'Other parameters',
'Please enter other parameters': 'Please enter other parameters',
'Resources': 'Resources',
'Custom Parameters': 'Custom Parameters',
'Custom template': 'Custom template',
'Datasource': 'Datasource',
'methods': 'methods',
'Please enter method(optional)': 'Please enter method(optional)',
'Script': 'Script',
'Please enter script(required)': 'Please enter script(required)',
'Deploy Mode': 'Deploy Mode',
'Driver core number': 'Driver core number',
'Please enter driver core number': 'Please enter driver core number',
'Driver memory use': 'Driver memory use',
'Please enter driver memory use': 'Please enter driver memory use',
'Number of Executors': 'Number of Executors',
'Please enter the number of Executor': 'Please enter the number of Executor',
'Executor memory': 'Executor memory',
'Please enter the Executor memory': 'Please enter the Executor memory',
'Executor core number': 'Executor core number',
'Please enter Executor core number': 'Please enter Executor core number',
'The number of Executors should be a positive integer': 'The number of Executors should be a positive integer',
'Memory should be a positive integer': 'Memory should be a positive integer',
'Please enter ExecutorPlease enter Executor core number': 'Please enter ExecutorPlease enter Executor core number',
'Core number should be positive integer': 'Core number should be positive integer',
'SQL Type': 'SQL Type',
'Title': 'Title',
'Please enter the title of email': 'Please enter the title of email',
'Table': 'Table',
'TableMode': 'Table',
'Attachment': 'Attachment',
'SQL Parameter': 'SQL Parameter',
'SQL Statement': 'SQL Statement',
'UDF Function': 'UDF Function',
'Please enter a SQL Statement(required)': 'Please enter a SQL Statement(required)',
'Please enter a JSON Statement(required)': 'Please enter a JSON Statement(required)',
'One form or attachment must be selected': 'One form or attachment must be selected',
'Recipient required': 'Recipient required',
'Mail subject required': 'Mail subject required',
'Child Node': 'Child Node',
'Please select a sub-Process': 'Please select a sub-Process',
'Edit': 'Edit',
'Datasource Name': 'Datasource Name',
'Please enter datasource name': 'Please enter datasource name',
'IP': 'IP',
'Please enter IP': 'Please enter IP',
'Port': 'Port',
'Please enter port': 'Please enter port',
'Database Name': 'Database Name',
'Please enter database name': 'Please enter database name',
'Oracle Connect Type': 'ServiceName or SID',
'Oracle Service Name': 'ServiceName',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc connect parameters',
'Test Connect': 'Test Connect',
'Please enter resource name': 'Please enter resource name',
'Please enter resource folder name': 'Please enter resource folder name',
'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement',
'Please enter IP/hostname': 'Please enter IP/hostname',
'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format',
'#': '#',
'Datasource Type': 'Datasource Type',
'Datasource Parameter': 'Datasource Parameter',
'Create Time': 'Create Time',
'Update Time': 'Update Time',
'Operation': 'Operation',
'Click to view': 'Click to view',
'Delete?': 'Delete?',
'Confirm': 'Confirm',
'Task status statistics': 'Task Status Statistics',
'Number': 'Number',
'State': 'State',
'Process Status Statistics': 'Process Status Statistics',
'Process Definition Statistics': 'Process Definition Statistics',
'Project Name': 'Project Name',
'Please enter name': 'Please enter name',
'Owned Users': 'Owned Users',
'Process Pid': 'Process Pid',
'Zk registration directory': 'Zk registration directory',
'cpuUsage': 'cpuUsage',
'memoryUsage': 'memoryUsage',
'Last heartbeat time': 'Last heartbeat time',
'Edit Tenant': 'Edit Tenant',
'Tenant Code': 'Tenant Code',
'Tenant Name': 'Tenant Name',
'Queue': 'Queue',
'Please select a queue': 'Please select a queue',
'Please enter the tenant code in English': 'Please enter the tenant code in English',
'Please enter tenant code in English': 'Please enter tenant code in English',
'Edit User': 'Edit User',
'Tenant': 'Tenant',
'Email': 'Email',
'Phone': 'Phone',
'Please enter phone number': 'Please enter phone number',
'Please enter main class': 'Please enter main class',
'Please enter email': 'Please enter email',
'Please enter the correct email format': 'Please enter the correct email format',
'Please enter the correct mobile phone format': 'Please enter the correct mobile phone format',
'Project': 'Project',
'Authorize': 'Authorize',
'File resources': 'File resources',
'UDF resources': 'UDF resources',
'Please select UDF resources directory': 'Please select UDF resources directory',
'UDF resources directory' : 'UDF resources directory',
'Upload File Size': 'Upload File size cannot exceed 1g',
'Edit alarm group': 'Edit alarm group',
'Create alarm group': 'Create alarm group',
'Group Name': 'Group Name',
'Please enter group name': 'Please enter group name',
'Group Type': 'Group Type',
'Remarks': 'Remarks',
'SMS': 'SMS',
'Managing Users': 'Managing Users',
'Permission': 'Permission',
'Administrator': 'Administrator',
'Confirm Password': 'Confirm Password',
'Please enter confirm password': 'Please enter confirm password',
'Password cannot be in Chinese': 'Password cannot be in Chinese',
'Please enter a password (6-22) character password': 'Please enter a password (6-22) character password',
'Confirmation password cannot be in Chinese': 'Confirmation password cannot be in Chinese',
'Please enter a confirmation password (6-22) character password': 'Please enter a confirmation password (6-22) character password',
'The password is inconsistent with the confirmation password': 'The password is inconsistent with the confirmation password',
'Please select the datasource': 'Please select the datasource',
'Please select resources': 'Please select resources',
'Query': 'Query',
'Non Query': 'Non Query',
'prop(required)': 'prop(required)',
'value(optional)': 'value(optional)',
'value(required)': 'value(required)',
'prop is empty': 'prop is empty',
'value is empty': 'value is empty',
'prop is repeat': 'prop is repeat',
'Start Time': 'Start Time',
'End Time': 'End Time',
'crontab': 'crontab',
'Failure Strategy': 'Failure Strategy',
'online': 'online',
'offline': 'offline',
'Task Status': 'Task Status',
'Process Instance': 'Process Instance',
'Task Instance': 'Task Instance',
'Select date range': 'Select date range',
'Date': 'Date',
'waiting': 'waiting',
'execution': 'execution',
'finish': 'finish',
'Create File': 'Create File',
'Create folder': 'Create folder',
'File Name': 'File Name',
'Folder Name': 'Folder Name',
'File Format': 'File Format',
'Folder Format': 'Folder Format',
'File Content': 'File Content',
'Create': 'Create',
'Please enter the resource content': 'Please enter the resource content',
'Resource content cannot exceed 3000 lines': 'Resource content cannot exceed 3000 lines',
'File Details': 'File Details',
'Download Details': 'Download Details',
'Return': 'Return',
'Save': 'Save',
'File Manage': 'File Manage',
'Upload Files': 'Upload Files',
'Create UDF Function': 'Create UDF Function',
'Upload UDF Resources': 'Upload UDF Resources',
'Service-Master': 'Service-Master',
'Service-Worker': 'Service-Worker',
'Process Name': 'Process Name',
'Executor': 'Executor',
'Run Type': 'Run Type',
'Scheduling Time': 'Scheduling Time',
'Run Times': 'Run Times',
'host': 'host',
'fault-tolerant sign': 'fault-tolerant sign',
'Rerun': 'Rerun',
'Recovery Failed': 'Recovery Failed',
'Stop': 'Stop',
'Pause': 'Pause',
'Recovery Suspend': 'Recovery Suspend',
'Gantt': 'Gantt',
'Name': 'Name',
'Node Type': 'Node Type',
'Submit Time': 'Submit Time',
'Duration': 'Duration',
'Retry Count': 'Retry Count',
'Task Name': 'Task Name',
'Task Date': 'Task Date',
'Source Table': 'Source Table',
'Record Number': 'Record Number',
'Target Table': 'Target Table',
'Online viewing type is not supported': 'Online viewing type is not supported',
'Size': 'Size',
'Rename': 'Rename',
'Download': 'Download',
'Export': 'Export',
'Submit': 'Submit',
'Edit UDF Function': 'Edit UDF Function',
'type': 'type',
'UDF Function Name': 'UDF Function Name',
'FILE': 'FILE',
'UDF': 'UDF',
'File Subdirectory': 'File Subdirectory',
'Please enter a function name': 'Please enter a function name',
'Package Name': 'Package Name',
'Please enter a Package name': 'Please enter a Package name',
'Parameter': 'Parameter',
'Please enter a parameter': 'Please enter a parameter',
'UDF Resources': 'UDF Resources',
'Upload Resources': 'Upload Resources',
'Instructions': 'Instructions',
'Please enter a instructions': 'Please enter a instructions',
'Please enter a UDF function name': 'Please enter a UDF function name',
'Select UDF Resources': 'Select UDF Resources',
'Class Name': 'Class Name',
'Jar Package': 'Jar Package',
'Library Name': 'Library Name',
'UDF Resource Name': 'UDF Resource Name',
'File Size': 'File Size',
'Description': 'Description',
'Drag Nodes and Selected Items': 'Drag Nodes and Selected Items',
'Select Line Connection': 'Select Line Connection',
'Delete selected lines or nodes': 'Delete selected lines or nodes',
'Full Screen': 'Full Screen',
'Unpublished': 'Unpublished',
'Start Process': 'Start Process',
'Execute from the current node': 'Execute from the current node',
'Recover tolerance fault process': 'Recover tolerance fault process',
'Resume the suspension process': 'Resume the suspension process',
'Execute from the failed nodes': 'Execute from the failed nodes',
'Complement Data': 'Complement Data',
'slot':'slot',
'taskManager':'taskManager',
'jobManagerMemory':'jobManagerMemory',
'taskManagerMemory':'taskManagerMemory',
'Scheduling execution': 'Scheduling execution',
'Recovery waiting thread': 'Recovery waiting thread',
'Submitted successfully': 'Submitted successfully',
'Executing': 'Executing',
'Ready to pause': 'Ready to pause',
'Ready to stop': 'Ready to stop',
'Need fault tolerance': 'Need fault tolerance',
'kill': 'kill',
'Waiting for thread': 'Waiting for thread',
'Waiting for dependence': 'Waiting for dependence',
'Start': 'Start',
'Copy': 'Copy',
'Copy name': 'Copy name',
'Delete': 'Delete',
'Please enter keyword': 'Please enter keyword',
'File Upload': 'File Upload',
'Drag the file into the current upload window': 'Drag the file into the current upload window',
'Drag area upload': 'Drag area upload',
'Upload': 'Upload',
'Please enter file name': 'Please enter file name',
'Please select the file to upload': 'Please select the file to upload',
'Resources manage': 'Resources',
'Security': 'Security',
'Logout': 'Logout',
'No data': 'No data',
'Uploading...': 'Uploading...',
'Loading...': 'Loading...',
'List': 'List',
'Unable to download without proper url': 'Unable to download without proper url',
'Process': 'Process',
'Process definition': 'Process definition',
'Task record': 'Task record',
'Warning group manage': 'Warning group manage',
'Servers manage': 'Servers manage',
'UDF manage': 'UDF manage',
'Resource manage': 'Resource manage',
'Function manage': 'Function manage',
'Edit password': 'Edit password',
'Ordinary users': 'Ordinary users',
'Create process': 'Create process',
'Import process': 'Import process',
'Timing state': 'Timing state',
'Timing': 'Timing',
'TreeView': 'TreeView',
'Mailbox already exists! Recipients and copyers cannot repeat': 'Mailbox already exists! Recipients and copyers cannot repeat',
'Mailbox input is illegal': 'Mailbox input is illegal',
'Please set the parameters before starting': 'Please set the parameters before starting',
'Continue': 'Continue',
'End': 'End',
'Node execution': 'Node execution',
'Backward execution': 'Backward execution',
'Forward execution': 'Forward execution',
'Execute only the current node': 'Execute only the current node',
'Notification strategy': 'Notification strategy',
'Notification group': 'Notification group',
'Please select a notification group': 'Please select a notification group',
'Recipient': 'Recipient',
'Cc': 'Cc',
'Whether it is a complement process?': 'Whether it is a complement process?',
'Schedule date': 'Schedule date',
'Mode of execution': 'Mode of execution',
'Serial execution': 'Serial execution',
'Parallel execution': 'Parallel execution',
'Set parameters before timing': 'Set parameters before timing',
'Start and stop time': 'Start and stop time',
'Please select time': 'Please select time',
'Please enter crontab': 'Please enter crontab',
'none_1': 'none',
'success_1': 'success',
'failure_1': 'failure',
'All_1': 'All',
'Toolbar': 'Toolbar',
'View variables': 'View variables',
'Format DAG': 'Format DAG',
'Refresh DAG status': 'Refresh DAG status',
'Return_1': 'Return',
'Please enter format': 'Please enter format',
'connection parameter': 'connection parameter',
'Process definition details': 'Process definition details',
'Create process definition': 'Create process definition',
'Scheduled task list': 'Scheduled task list',
'Process instance details': 'Process instance details',
'Create Resource': 'Create Resource',
'User Center': 'User Center',
'Please enter method': 'Please enter method',
'none': 'none',
'name': 'name',
'Process priority': 'Process priority',
'Task priority': 'Task priority',
'Task timeout alarm': 'Task timeout alarm',
'Timeout strategy': 'Timeout strategy',
'Timeout alarm': 'Timeout alarm',
'Timeout failure': 'Timeout failure',
'Timeout period': 'Timeout period',
'Timeout strategy must be selected': 'Timeout strategy must be selected',
'Timeout must be a positive integer': 'Timeout must be a positive integer',
'Add dependency': 'Add dependency',
'and': 'and',
'or': 'or',
'month': 'month',
'week': 'week',
'day': 'day',
'hour': 'hour',
'Running': 'Running',
'Waiting for dependency to complete': 'Waiting for dependency to complete',
'Selected': 'Selected',
'Last1Hour': 'Last1Hour',
'Last2Hours': 'Last2Hours',
'Last3Hours': 'Last3Hours',
'today': 'today',
'Last1Days': 'Last1Days',
'Last2Days': 'Last2Days',
'Last3Days': 'Last3Days',
'Last7Days': 'Last7Days',
'ThisWeek': 'ThisWeek',
'LastWeek': 'LastWeek',
'LastMonday': 'LastMonday',
'LastTuesday': 'LastTuesday',
'LastWednesday': 'LastWednesday',
'LastThursday': 'LastThursday',
'LastFriday': 'LastFriday',
'LastSaturday': 'LastSaturday',
'LastSunday': 'LastSunday',
'ThisMonth': 'ThisMonth',
'LastMonth': 'LastMonth',
'LastMonthBegin': 'LastMonthBegin',
'LastMonthEnd': 'LastMonthEnd',
'Refresh status succeeded': 'Refresh status succeeded',
'Queue manage': 'Queue manage',
'Create queue': 'Create queue',
'Edit queue': 'Edit queue',
'Datasource manage': 'Datasource',
'History task record': 'History task record',
'Please go online': 'Please go online',
'Queue value': 'Queue value',
'Please enter queue value': 'Please enter queue value',
'Worker group manage': 'Worker group manage',
'Create worker group': 'Create worker group',
'Edit worker group': 'Edit worker group',
'Token manage': 'Token manage',
'Create token': 'Create token',
'Edit token': 'Edit token',
'Please enter the IP address separated by commas': 'Please enter the IP address separated by commas',
'Note: Multiple IP addresses have been comma separated': 'Note: Multiple IP addresses have been comma separated',
'Failure time': 'Failure time',
'User': 'User',
'Please enter token': 'Please enter token',
'Generate token': 'Generate token',
'Monitor': 'Monitor',
'Group': 'Group',
'Queue statistics': 'Queue statistics',
'Command status statistics': 'Command status statistics',
'Task kill': 'Task Kill',
'Task queue': 'Task queue',
'Error command count': 'Error command count',
'Normal command count': 'Normal command count',
'Manage': ' Manage',
'Number of connections': 'Number of connections',
'Sent': 'Sent',
'Received': 'Received',
'Min latency': 'Min latency',
'Avg latency': 'Avg latency',
'Max latency': 'Max latency',
'Node count': 'Node count',
'Query time': 'Query time',
'Node self-test status': 'Node self-test status',
'Health status': 'Health status',
'Max connections': 'Max connections',
'Threads connections': 'Threads connections',
'Max used connections': 'Max used connections',
'Threads running connections': 'Threads running connections',
'Worker group': 'Worker group',
'Please enter a positive integer greater than 0': 'Please enter a positive integer greater than 0',
'Pre Statement': 'Pre Statement',
'Post Statement': 'Post Statement',
'Statement cannot be empty': 'Statement cannot be empty',
'Process Define Count': 'Work flow Define Count',
'Process Instance Running Count': 'Process Instance Running Count',
'command number of waiting for running': 'command number of waiting for running',
'failure command number': 'failure command number',
'tasks number of waiting running': 'tasks number of waiting running',
'task number of ready to kill': 'task number of ready to kill',
'Statistics manage': 'Statistics Manage',
'statistics': 'Statistics',
'select tenant':'select tenant',
'Please enter Principal':'Please enter Principal',
'The start time must not be the same as the end': 'The start time must not be the same as the end',
'Startup parameter': 'Startup parameter',
'Startup type': 'Startup type',
'warning of timeout': 'warning of timeout',
'Next five execution times': 'Next five execution times',
'Execute time': 'Execute time',
'Complement range': 'Complement range',
'Http Url':'Http Url',
'Http Method':'Http Method',
'Http Parameters':'Http Parameters',
'Http Parameters Key':'Http Parameters Key',
'Http Parameters Position':'Http Parameters Position',
'Http Parameters Value':'Http Parameters Value',
'Http Check Condition':'Http Check Condition',
'Http Condition':'Http Condition',
'Please Enter Http Url': 'Please Enter Http Url(required)',
'Please Enter Http Condition': 'Please Enter Http Condition',
'There is no data for this period of time': 'There is no data for this period of time',
'IP address cannot be empty': 'IP address cannot be empty',
'Please enter the correct IP': 'Please enter the correct IP',
'Please generate token': 'Please generate token',
'Spark Version': 'Spark Version',
'TargetDataBase': 'target database',
'TargetTable': 'target table',
'Please enter the table of target': 'Please enter the table of target',
'Please enter a Target Table(required)': 'Please enter a Target Table(required)',
'SpeedByte': 'speed(byte count)',
'SpeedRecord': 'speed(record count)',
'0 means unlimited by byte': '0 means unlimited',
'0 means unlimited by count': '0 means unlimited',
'Modify User': 'Modify User',
'Whether directory': 'Whether directory',
'Yes': 'Yes',
'No': 'No',
'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)',
'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)',
'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)',
'Please enter Target Dir(required)': 'Please enter Target Dir(required)',
'Please enter Export Dir(required)': 'Please enter Export Dir(required)',
'Please enter Hive Database(required)': 'Please enter Hive Databasec(required)',
'Please enter Hive Table(required)': 'Please enter Hive Table(required)',
'Please enter Hive Partition Keys': 'Please enter Hive Partition Key',
'Please enter Hive Partition Values': 'Please enter Partition Value',
'Please enter Replace Delimiter': 'Please enter Replace Delimiter',
'Please enter Fields Terminated': 'Please enter Fields Terminated',
'Please enter Lines Terminated': 'Please enter Lines Terminated',
'Please enter Concurrency': 'Please enter Concurrency',
'Please enter Update Key': 'Please enter Update Key',
'Direct': 'Direct',
'Type': 'Type',
'ModelType': 'ModelType',
'ColumnType': 'ColumnType',
'Database': 'Database',
'Column': 'Column',
'Map Column Hive': 'Map Column Hive',
'Map Column Java': 'Map Column Java',
'Export Dir': 'Export Dir',
'Hive partition Keys': 'Hive partition Keys',
'Hive partition Values': 'Hive partition Values',
'FieldsTerminated': 'FieldsTerminated',
'LinesTerminated': 'LinesTerminated',
'IsUpdate': 'IsUpdate',
'UpdateKey': 'UpdateKey',
'UpdateMode': 'UpdateMode',
'Target Dir': 'Target Dir',
'DeleteTargetDir': 'DeleteTargetDir',
'FileType': 'FileType',
'CompressionCodec': 'CompressionCodec',
'CreateHiveTable': 'CreateHiveTable',
'DropDelimiter': 'DropDelimiter',
'OverWriteSrc': 'OverWriteSrc',
'ReplaceDelimiter': 'ReplaceDelimiter',
'Concurrency': 'Concurrency',
'Form': 'Form',
'OnlyUpdate': 'OnlyUpdate',
'AllowInsert': 'AllowInsert',
'Data Source': 'Data Source',
'Data Target': 'Data Target',
'All Columns': 'All Columns',
'Some Columns': 'Some Columns',
'Branch flow': 'Branch flow',
'Cannot select the same node for successful branch flow and failed branch flow': 'Cannot select the same node for successful branch flow and failed branch flow',
'Successful branch flow and failed branch flow are required': 'Successful branch flow and failed branch flow are required',
'Unauthorized or deleted resources': 'Unauthorized or deleted resources',
'Please delete all non-existent resources': 'Please delete all non-existent resources',
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,578 | [BUG]wrong description of work flow copy tips | ![image](https://user-images.githubusercontent.com/29528966/80588031-28815900-8a4a-11ea-8c41-595890f0c529.png)
version:
dev
i think the tips should be "work flow copy" | https://github.com/apache/dolphinscheduler/issues/2578 | https://github.com/apache/dolphinscheduler/pull/2612 | 6a92bef5e36dd6c7100517cfa516ca9c8603d950 | fadfb28187f48648230094e05f946d2b41e8f67a | "2020-04-29T10:50:58Z" | java | "2020-05-06T13:33:08Z" | dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export default {
'User Name': '用户名',
'Please enter user name': '请输入用户名',
'Password': '密码',
'Please enter your password': '请输入密码',
'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间',
'Login': '登录',
'Home': '首页',
'Failed to create node to save': '未创建节点保存失败',
'Global parameters': '全局参数',
'Local parameters': '局部参数',
'Copy success': '复制成功',
'The browser does not support automatic copying': '该浏览器不支持自动复制',
'Whether to save the DAG graph': '是否保存DAG图',
'Current node settings': '当前节点设置',
'View history': '查看历史',
'View log': '查看日志',
'Enter this child node': '进入该子节点',
'Node name': '节点名称',
'Please enter name(required)': '请输入名称(必填)',
'Run flag': '运行标志',
'Normal': '正常',
'Prohibition execution': '禁止执行',
'Please enter description': '请输入描述',
'Number of failed retries': '失败重试次数',
'Times': '次',
'Failed retry interval': '失败重试间隔',
'Minute': '分',
'Cancel': '取消',
'Confirm add': '确认添加',
'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流',
'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流',
'Please enter name (required)': '请输入名称(必填)',
'Name already exists': '名称已存在请重新输入',
'Download Log': '下载日志',
'Refresh Log': '刷新日志',
'Enter full screen': '进入全屏',
'Cancel full screen': '取消全屏',
'Close': '关闭',
'Update log success': '更新日志成功',
'No more logs': '暂无更多日志',
'No log': '暂无日志',
'Loading Log...': '正在努力请求日志中...',
'Set the DAG diagram name': '设置DAG图名称',
'Please enter description(optional)': '请输入描述(选填)',
'Set global': '设置全局',
'Whether to update the process definition': '是否更新流程定义',
'Add': '添加',
'DAG graph name cannot be empty': 'DAG图名称不能为空',
'Create Datasource': '创建数据源',
'Project Home': '项目首页',
'Project Manage': '项目管理',
'Create Project': '创建项目',
'Cron Manage': '定时管理',
'Tenant Manage': '租户管理',
'Create Tenant': '创建租户',
'User Manage': '用户管理',
'Create User': '创建用户',
'User Information': '用户信息',
'Edit Password': '密码修改',
'success': '成功',
'failed': '失败',
'delete': '删除',
'Please choose': '请选择',
'Please enter a positive integer': '请输入正整数',
'Program Type': '程序类型',
'Main class': '主函数的class',
'Main jar package': '主jar包',
'Please enter main jar package': '请选择主jar包',
'Command-line parameters': '命令行参数',
'Please enter Command-line parameters': '请输入命令行参数',
'Other parameters': '其他参数',
'Please enter other parameters': '请输入其他参数',
'Resources': '资源',
'Custom Parameters': '自定义参数',
'Custom template': '自定义模版',
'Please enter main class': '请填写主函数的class',
'Datasource': '数据源',
'methods': '方法',
'Please enter method(optional)': '请输入方法(选填)',
'Script': '脚本',
'Please enter script(required)': '请输入脚本(必填)',
'Deploy Mode': '部署方式',
'Driver core number': 'Driver内核数',
'Please enter driver core number': '请输入Driver内核数',
'Driver memory use': 'Driver内存数',
'Please enter driver memory use': '请输入Driver内存数',
'Number of Executors': 'Executor数量',
'Please enter the number of Executor': '请输入Executor数量',
'Executor memory': 'Executor内存数',
'Please enter the Executor memory': '请输入Executor内存数',
'Executor core number': 'Executor内核数',
'Please enter Executor core number': '请输入Executor内核数',
'The number of Executors should be a positive integer': 'Executor数量为正整数',
'Memory should be a positive integer': '内存数为数字',
'Please enter ExecutorPlease enter Executor core number': '请填写Executor内核数',
'Core number should be positive integer': '内核数为正整数',
'SQL Type': 'sql类型',
'Title': '主题',
'Please enter the title of email': '请输入邮件主题',
'Table': '表名',
'TableMode': '表格',
'Attachment': '附件',
'SQL Parameter': 'sql参数',
'SQL Statement': 'sql语句',
'UDF Function': 'UDF函数',
'FILE': '文件',
'UDF': 'UDF',
'File Subdirectory': '文件子目录',
'Please enter a SQL Statement(required)': '请输入sql语句(必填)',
'Please enter a JSON Statement(required)': '请输入json语句(必填)',
'One form or attachment must be selected': '表格、附件必须勾选一个',
'Recipient required': '收件人邮箱必填',
'Mail subject required': '邮件主题必填',
'Child Node': '子节点',
'Please select a sub-Process': '请选择子工作流',
'Edit': '编辑',
'Datasource Name': '数据源名称',
'Please enter datasource name': '请输入数据源名称',
'IP': 'IP主机名',
'Please enter IP': '请输入IP主机名',
'Port': '端口',
'Please enter port': '请输入端口',
'Database Name': '数据库名',
'Please enter database name': '请输入数据库名',
'Oracle Connect Type': '服务名或SID',
'Oracle Service Name': '服务名',
'Oracle SID': 'SID',
'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称',
'Please enter resource folder name': '请输入资源文件夹名称',
'Please enter a non-query SQL statement': '请输入非查询sql语句',
'Please enter IP/hostname': '请输入IP/主机名',
'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式',
'#': '编号',
'Datasource Type': '数据源类型',
'Datasource Parameter': '数据源参数',
'Create Time': '创建时间',
'Update Time': '更新时间',
'Operation': '操作',
'Click to view': '点击查看',
'Delete?': '确定删除吗?',
'Confirm': '确定',
'Task status statistics': '任务状态统计',
'Number': '数量',
'State': '状态',
'Process Status Statistics': '流程状态统计',
'Process Definition Statistics': '流程定义统计',
'Project Name': '项目名称',
'Please enter name': '请输入名称',
'Owned Users': '所属用户',
'Process Pid': '进程pid',
'Zk registration directory': 'zk注册目录',
'cpuUsage': 'cpuUsage',
'memoryUsage': 'memoryUsage',
'Last heartbeat time': '最后心跳时间',
'Edit Tenant': '编辑租户',
'Tenant Code': '租户编码',
'Tenant Name': '租户名称',
'Queue': '队列',
'Please enter the tenant code in English': '请输入租户编码只允许英文',
'Please enter tenant code in English': '请输入英文租户编码',
'Edit User': '编辑用户',
'Tenant': '租户',
'Email': '邮件',
'Phone': '手机',
'Please enter phone number': '请输入手机',
'Please enter email': '请输入邮箱',
'Please enter the correct email format': '请输入正确的邮箱格式',
'Please enter the correct mobile phone format': '请输入正确的手机格式',
'Project': '项目',
'Authorize': '授权',
'File resources': '文件资源',
'UDF resources': 'UDF资源',
'UDF resources directory': 'UDF资源目录',
'Please select UDF resources directory': '请选择UDF资源目录',
'Edit alarm group': '编辑告警组',
'Create alarm group': '创建告警组',
'Group Name': '组名称',
'Please enter group name': '请输入组名称',
'Group Type': '组类型',
'Remarks': '备注',
'SMS': '短信',
'Managing Users': '管理用户',
'Permission': '权限',
'Administrator': '管理员',
'Confirm Password': '确认密码',
'Please enter confirm password': '请输入确认密码',
'Password cannot be in Chinese': '密码不能为中文',
'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码',
'Confirmation password cannot be in Chinese': '确认密码不能为中文',
'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码',
'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认',
'Please select the datasource': '请选择数据源',
'Please select resources': '请选择资源',
'Query': '查询',
'Non Query': '非查询',
'prop(required)': 'prop(必填)',
'value(optional)': 'value(选填)',
'value(required)': 'value(必填)',
'prop is empty': 'prop不能为空',
'value is empty': 'value不能为空',
'prop is repeat': 'prop中有重复',
'Start Time': '开始时间',
'End Time': '结束时间',
'crontab': 'crontab',
'Failure Strategy': '失败策略',
'online': '上线',
'offline': '下线',
'Task Status': '任务状态',
'Process Instance': '工作流实例',
'Task Instance': '任务实例',
'Select date range': '选择日期区间',
'Date': '日期',
'waiting': '等待',
'execution': '执行中',
'finish': '完成',
'Create File': '创建文件',
'Create folder': '创建文件夹',
'File Name': '文件名称',
'Folder Name': '文件夹名称',
'File Format': '文件格式',
'Folder Format': '文件夹格式',
'File Content': '文件内容',
'Upload File Size': '文件大小不能超过1G',
'Create': '创建',
'Please enter the resource content': '请输入资源内容',
'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行',
'File Details': '文件详情',
'Download Details': '下载详情',
'Return': '返回',
'Save': '保存',
'File Manage': '文件管理',
'Upload Files': '上传文件',
'Create UDF Function': '创建UDF函数',
'Upload UDF Resources': '上传UDF资源',
'Service-Master': '服务管理-Master',
'Service-Worker': '服务管理-Worker',
'Process Name': '工作流名称',
'Executor': '执行用户',
'Run Type': '运行类型',
'Scheduling Time': '调度时间',
'Run Times': '运行次数',
'host': 'host',
'fault-tolerant sign': '容错标识',
'Rerun': '重跑',
'Recovery Failed': '恢复失败',
'Stop': '停止',
'Pause': '暂停',
'Recovery Suspend': '恢复运行',
'Gantt': '甘特图',
'Name': '名称',
'Node Type': '节点类型',
'Submit Time': '提交时间',
'Duration': '运行时长',
'Retry Count': '重试次数',
'Task Name': '任务名称',
'Task Date': '任务日期',
'Source Table': '源表',
'Record Number': '记录数',
'Target Table': '目标表',
'Online viewing type is not supported': '不支持在线查看类型',
'Size': '大小',
'Rename': '重命名',
'Download': '下载',
'Export': '导出',
'Submit': '提交',
'Edit UDF Function': '编辑UDF函数',
'type': '类型',
'UDF Function Name': 'UDF函数名称',
'Please enter a function name': '请输入函数名',
'Package Name': '包名类名',
'Please enter a Package name': '请输入包名类名',
'Parameter': '参数',
'Please enter a parameter': '请输入参数',
'UDF Resources': 'UDF资源',
'Upload Resources': '上传资源',
'Instructions': '使用说明',
'Please enter a instructions': '请输入使用说明',
'Please enter a UDF function name': '请输入UDF函数名称',
'Select UDF Resources': '请选择UDF资源',
'Class Name': '类名',
'Jar Package': 'jar包',
'Library Name': '库名',
'UDF Resource Name': 'UDF资源名称',
'File Size': '文件大小',
'Description': '描述',
'Drag Nodes and Selected Items': '拖动节点和选中项',
'Select Line Connection': '选择线条连接',
'Delete selected lines or nodes': '删除选中的线或节点',
'Full Screen': '全屏',
'Unpublished': '未发布',
'Start Process': '启动工作流',
'Execute from the current node': '从当前节点开始执行',
'Recover tolerance fault process': '恢复被容错的工作流',
'Resume the suspension process': '恢复运行流程',
'Execute from the failed nodes': '从失败节点开始执行',
'Complement Data': '补数',
'Scheduling execution': '调度执行',
'Recovery waiting thread': '恢复等待线程',
'Submitted successfully': '提交成功',
'Executing': '正在执行',
'Ready to pause': '准备暂停',
'Ready to stop': '准备停止',
'Need fault tolerance': '需要容错',
'kill': 'kill',
'Waiting for thread': '等待线程',
'Waiting for dependence': '等待依赖',
'Start': '运行',
'Copy': '复制节点',
'Copy name': '复制名称',
'Delete': '删除',
'Please enter keyword': '请输入关键词',
'File Upload': '文件上传',
'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!',
'Drag area upload': '拖动区域上传',
'Upload': '上传',
'Please enter file name': '请输入文件名',
'Please select the file to upload': '请选择要上传的文件',
'Resources manage': '资源中心',
'Security': '安全中心',
'Logout': '退出',
'No data': '查询无数据',
'Uploading...': '文件上传中',
'Loading...': '正在努力加载中...',
'List': '列表',
'Unable to download without proper url': '无下载url无法下载',
'Process': '工作流',
'Process definition': '工作流定义',
'Task record': '任务记录',
'Warning group manage': '告警组管理',
'Servers manage': '服务管理',
'UDF manage': 'UDF管理',
'Resource manage': '资源管理',
'Function manage': '函数管理',
'Edit password': '修改密码',
'Ordinary users': '普通用户',
'Create process': '创建工作流',
'Import process': '导入工作流',
'Timing state': '定时状态',
'Timing': '定时',
'TreeView': '树形图',
'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复',
'Mailbox input is illegal': '邮箱输入不合法',
'Please set the parameters before starting': '启动前请先设置参数',
'Continue': '继续',
'End': '结束',
'Node execution': '节点执行',
'Backward execution': '向后执行',
'Forward execution': '向前执行',
'Execute only the current node': '仅执行当前节点',
'Notification strategy': '通知策略',
'Notification group': '通知组',
'Please select a notification group': '请选择通知组',
'Recipient': '收件人',
'Cc': '抄送人',
'Whether it is a complement process?': '是否补数',
'Schedule date': '调度日期',
'Mode of execution': '执行方式',
'Serial execution': '串行执行',
'Parallel execution': '并行执行',
'Set parameters before timing': '定时前请先设置参数',
'Start and stop time': '起止时间',
'Please select time': '请选择时间',
'Please enter crontab': '请输入crontab',
'none_1': '都不发',
'success_1': '成功发',
'failure_1': '失败发',
'All_1': '成功或失败都发',
'Toolbar': '工具栏',
'View variables': '查看变量',
'Format DAG': '格式化DAG',
'Refresh DAG status': '刷新DAG状态',
'Return_1': '返回上一节点',
'Please enter format': '请输入格式为',
'connection parameter': '连接参数',
'Process definition details': '流程定义详情',
'Create process definition': '创建流程定义',
'Scheduled task list': '定时任务列表',
'Process instance details': '流程实例详情',
'Create Resource': '创建资源',
'User Center': '用户中心',
'Please enter method': '请输入方法',
'none': '无',
'name': '名称',
'Process priority': '流程优先级',
'Task priority': '任务优先级',
'Task timeout alarm': '任务超时告警',
'Timeout strategy': '超时策略',
'Timeout alarm': '超时告警',
'Timeout failure': '超时失败',
'Timeout period': '超时时长',
'Timeout strategy must be selected': '超时策略必须选一个',
'Timeout must be a positive integer': '超时时长必须为正整数',
'Add dependency': '添加依赖',
'and': '且',
'or': '或',
'month': '月',
'week': '周',
'day': '日',
'hour': '时',
'Running': '正在运行',
'Waiting for dependency to complete': '等待依赖完成',
'Selected': '已选',
'Last1Hour': '前1小时',
'Last2Hours': '前2小时',
'Last3Hours': '前3小时',
'today': '今天',
'Last1Days': '昨天',
'Last2Days': '前两天',
'Last3Days': '前三天',
'Last7Days': '前七天',
'ThisWeek': '本周',
'LastWeek': '上周',
'LastMonday': '上周一',
'LastTuesday': '上周二',
'LastWednesday': '上周三',
'LastThursday': '上周四',
'LastFriday': '上周五',
'LastSaturday': '上周六',
'LastSunday': '上周日',
'ThisMonth': '本月',
'LastMonth': '上月',
'LastMonthBegin': '上月初',
'LastMonthEnd': '上月末',
'Refresh status succeeded': '刷新状态成功',
'Queue manage': '队列管理',
'Create queue': '创建队列',
'Edit queue': '编辑队列',
'Datasource manage': '数据源中心',
'History task record': '历史任务记录',
'Please go online': '不要忘记上线',
'Queue value': '队列值',
'Please enter queue value': '请输入队列值',
'Worker group manage': 'Worker分组管理',
'Create worker group': '创建Worker分组',
'Edit worker group': '编辑Worker分组',
'Token manage': '令牌管理',
'Create token': '创建令牌',
'Edit token': '编辑令牌',
'Please enter the IP address separated by commas': '请输入IP地址多个用英文逗号隔开',
'Note: Multiple IP addresses have been comma separated': '注意:多个IP地址以英文逗号分割',
'Failure time': '失效时间',
'User': '用户',
'Please enter token': '请输入令牌',
'Generate token': '生成令牌',
'Monitor': '监控中心',
'Group': '分组',
'Queue statistics': '队列统计',
'Command status statistics': '命令状态统计',
'Task kill': '等待kill任务',
'Task queue': '等待执行任务',
'Error command count': '错误指令数',
'Normal command count': '正确指令数',
'Manage': '管理',
'Number of connections': '连接数',
'Sent': '发送量',
'Received': '接收量',
'Min latency': '最低延时',
'Avg latency': '平均延时',
'Max latency': '最大延时',
'Node count': '节点数',
'Query time': '当前查询时间',
'Node self-test status': '节点自检状态',
'Health status': '健康状态',
'Max connections': '最大连接数',
'Threads connections': '当前连接数',
'Max used connections': '同时使用连接最大数',
'Threads running connections': '数据库当前活跃连接数',
'Worker group': 'Worker分组',
'Please enter a positive integer greater than 0': '请输入大于 0 的正整数',
'Pre Statement': '前置sql',
'Post Statement': '后置sql',
'Statement cannot be empty': '语句不能为空',
'Process Define Count': '工作流定义数',
'Process Instance Running Count': '正在运行的流程数',
'Please select a queue': '请选择队列',
'command number of waiting for running': '待执行的命令数',
'failure command number': '执行失败的命令数',
'tasks number of waiting running': '待运行任务数',
'task number of ready to kill': '待杀死任务数',
'Statistics manage': '统计管理',
'statistics': '统计',
'select tenant':'选择租户',
'Please enter Principal':'请输入Principal',
'The start time must not be the same as the end': '开始时间和结束时间不能相同',
'Startup parameter': '启动参数',
'Startup type': '启动类型',
'warning of timeout': '超时告警',
'Next five execution times': '接下来五次执行时间',
'Execute time': '执行时间',
'Complement range': '补数范围',
'slot':'slot数量',
'taskManager':'taskManage数量',
'jobManagerMemory':'jobManager内存数',
'taskManagerMemory':'taskManager内存数',
'Http Url':'请求地址',
'Http Method':'请求类型',
'Http Parameters':'请求参数',
'Http Parameters Key':'参数名',
'Http Parameters Position':'参数位置',
'Http Parameters Value':'参数值',
'Http Check Condition':'校验条件',
'Http Condition':'校验内容',
'Please Enter Http Url': '请填写请求地址(必填)',
'Please Enter Http Condition': '请填写校验内容',
'There is no data for this period of time': '该时间段无数据',
'IP address cannot be empty': 'IP地址不能为空',
'Please enter the correct IP': '请输入正确的IP',
'Please generate token': '请生成Token',
'Spark Version': 'Spark版本',
'TargetDataBase': '目标库',
'TargetTable': '目标表',
'Please enter the table of target': '请输入目标表名',
'Please enter a Target Table(required)': '请输入目标表(必填)',
'SpeedByte': '限流(字节数)',
'SpeedRecord': '限流(记录数)',
'0 means unlimited by byte': 'KB,0代表不限制',
'0 means unlimited by count': '0代表不限制',
'Modify User': '修改用户',
'Whether directory' : '是否文件夹',
'Yes': '是',
'No': '否',
'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)',
'Please enter Mysql Table(required)': '请输入Mysql表名(必填)',
'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开',
'Please enter Target Dir(required)': '请输入目标路径(必填)',
'Please enter Export Dir(required)': '请输入数据源路径(必填)',
'Please enter Hive Database(required)': '请输入Hive数据库(必填)',
'Please enter Hive Table(required)': '请输入Hive表名(必填)',
'Please enter Hive Partition Keys': '请输入分区键',
'Please enter Hive Partition Values': '请输入分区值',
'Please enter Replace Delimiter': '请输入替换分隔符',
'Please enter Fields Terminated': '请输入列分隔符',
'Please enter Lines Terminated': '请输入行分隔符',
'Please enter Concurrency': '请输入并发度',
'Please enter Update Key': '请输入更新列',
'Direct': '流向',
'Type': '类型',
'ModelType': '模式',
'ColumnType': '列类型',
'Database': '数据库',
'Column': '列',
'Map Column Hive': 'Hive类型映射',
'Map Column Java': 'Java类型映射',
'Export Dir': '数据源路径',
'Hive partition Keys': 'Hive 分区键',
'Hive partition Values': 'Hive 分区值',
'FieldsTerminated': '列分隔符',
'LinesTerminated': '行分隔符',
'IsUpdate': '是否更新',
'UpdateKey': '更新列',
'UpdateMode': '更新类型',
'Target Dir': '目标路径',
'DeleteTargetDir': '是否删除目录',
'FileType': '保存格式',
'CompressionCodec': '压缩类型',
'CreateHiveTable': '是否创建新表',
'DropDelimiter': '是否删除分隔符',
'OverWriteSrc': '是否覆盖数据源',
'ReplaceDelimiter': '替换分隔符',
'Concurrency': '并发度',
'Form': '表单',
'OnlyUpdate': '只更新',
'AllowInsert': '无更新便插入',
'Data Source': '数据来源',
'Data Target': '数据目的',
'All Columns': '全表导入',
'Some Columns': '选择列',
'Branch flow': '分支流转',
'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点',
'Successful branch flow and failed branch flow are required': '成功分支流转和失败分支流转必填',
'Unauthorized or deleted resources': '未授权或已删除资源',
'Please delete all non-existent resources': '请删除所有未授权或已删除资源',
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,481 | [BUG] the Mysql needs to configure with“allowMultiQueries = true”, | 1.Since MYSQL does not support batch upload, the data source needs to be configured with“allowMultiQueries = true”, otherwise an error will be occurred when deleting resource files。mysql configuration is as follows:
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true
2.It is recommended to change the database name to configurable
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2481 | https://github.com/apache/dolphinscheduler/pull/2573 | b5dbe94f07166d7dfd4dad531c2763bc39951815 | 632b2b503a7d4fc4d9a24961d5e4139b8373ccbc | "2020-04-21T03:57:53Z" | java | "2020-05-07T01:48:05Z" | dolphinscheduler-server/src/main/resources/config/install_config.conf | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTICE : If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[`
# postgresql or mysql
dbtype="mysql"
# db config
# db address and port
dbhost="192.168.xx.xx:3306"
# db username
username="xx"
# db passwprd
# NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[`
password="xx"
# zk cluster
zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
# Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd)
installPath="/data1_1T/dolphinscheduler"
# deployment user
# Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself
deployUser="dolphinscheduler"
# alert config
# mail server host
mailServerHost="smtp.exmail.qq.com"
# mail server port
# note: Different protocols and encryption methods correspond to different ports, when SSL/TLS is enabled, make sure the port is correct.
mailServerPort="25"
# sender
mailSender="xxxxxxxxxx"
# user
mailUser="xxxxxxxxxx"
# sender password
# note: The mail.passwd is email service authorization code, not the email login password.
mailPassword="xxxxxxxxxx"
# TLS mail protocol support
starttlsEnable="false"
#note: sslTrust is the same as mailServerHost
sslTrust="smtp.exmail.qq.com"
# SSL mail protocol support
# note: The SSL protocol is enabled by default.
# only one of TLS and SSL can be in the true state.
sslEnable="true"
# resource storage type:HDFS,S3,NONE
resourceStorageType="NONE"
# if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory.
# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
# Note,s3 be sure to create the root directory /dolphinscheduler
defaultFS="hdfs://mycluster:8020"
# if resourceStorageType is S3, the following three configuration is required, otherwise please ignore
s3Endpoint="http://192.168.xx.xx:9010"
s3AccessKey="xxxxxxxxxx"
s3SecretKey="xxxxxxxxxx"
# if not use hadoop resourcemanager, please keep default value; if resourcemanager HA enable, please type the HA ips ; if resourcemanager is single, make this value empty
yarnHaIps="192.168.xx.xx,192.168.xx.xx"
# if resourcemanager HA enable or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ark1 to actual resourcemanager hostname.
singleYarnIp="ark1"
# resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。/dolphinscheduler is recommended
resourceUploadPath="/dolphinscheduler"
# who have permissions to create directory under HDFS/S3 root path
# Note: if kerberos is enabled, please config hdfsRootUser=
hdfsRootUser="hdfs"
# kerberos config
# whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore
kerberosStartUp="false"
# kdc krb5 config file path
krb5ConfPath="$installPath/conf/krb5.conf"
# keytab username
keytabUserName="[email protected]"
# username keytab path
keytabPath="$installPath/conf/hdfs.headless.keytab"
# api server port
apiServerPort="12345"
# install hosts
# Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname
ips="ark0,ark1,ark2,ark3,ark4"
# ssh port, default 22
# Note: if ssh port is not default, modify here
sshPort="22"
# run master machine
# Note: list of hosts hostname for deploying master
masters="ark0,ark1"
# run worker machine
# note: list of machine hostnames for deploying workers
workers="ark2,ark3,ark4"
# run alert machine
# note: list of machine hostnames for deploying alert server
alertServer="ark3"
# run api machine
# note: list of machine hostnames for deploying api server
apiServers="ark1"
# whether to start monitoring self-starting scripts
monitorServerState="false"
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,481 | [BUG] the Mysql needs to configure with“allowMultiQueries = true”, | 1.Since MYSQL does not support batch upload, the data source needs to be configured with“allowMultiQueries = true”, otherwise an error will be occurred when deleting resource files。mysql configuration is as follows:
spring.datasource.url=jdbc:mysql://192.168.xx.xx:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true
2.It is recommended to change the database name to configurable
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2481 | https://github.com/apache/dolphinscheduler/pull/2573 | b5dbe94f07166d7dfd4dad531c2763bc39951815 | 632b2b503a7d4fc4d9a24961d5e4139b8373ccbc | "2020-04-21T03:57:53Z" | java | "2020-05-07T01:48:05Z" | install.sh | #!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
workDir=`dirname $0`
workDir=`cd ${workDir};pwd`
source ${workDir}/conf/config/install_config.conf
# 1.replace file
echo "1.replace file"
txt=""
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
txt="''"
fi
datasourceDriverClassname="com.mysql.jdbc.Driver"
if [ $dbtype == "postgresql" ];then
datasourceDriverClassname="org.postgresql.Driver"
fi
sed -i ${txt} "s#spring.datasource.driver-class-name.*#spring.datasource.driver-class-name=${datasourceDriverClassname}#g" conf/datasource.properties
sed -i ${txt} "s#spring.datasource.url.*#spring.datasource.url=jdbc:${dbtype}://${dbhost}/dolphinscheduler?characterEncoding=UTF-8#g" conf/datasource.properties
sed -i ${txt} "s#spring.datasource.username.*#spring.datasource.username=${username}#g" conf/datasource.properties
sed -i ${txt} "s#spring.datasource.password.*#spring.datasource.password=${password}#g" conf/datasource.properties
sed -i ${txt} "s#fs.defaultFS.*#fs.defaultFS=${defaultFS}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.endpoint.*#fs.s3a.endpoint=${s3Endpoint}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.access.key.*#fs.s3a.access.key=${s3AccessKey}#g" conf/common.properties
sed -i ${txt} "s#fs.s3a.secret.key.*#fs.s3a.secret.key=${s3SecretKey}#g" conf/common.properties
sed -i ${txt} "s#yarn.resourcemanager.ha.rm.ids.*#yarn.resourcemanager.ha.rm.ids=${yarnHaIps}#g" conf/common.properties
sed -i ${txt} "s#yarn.application.status.address.*#yarn.application.status.address=http://${singleYarnIp}:8088/ws/v1/cluster/apps/%s#g" conf/common.properties
sed -i ${txt} "s#hdfs.root.user.*#hdfs.root.user=${hdfsRootUser}#g" conf/common.properties
sed -i ${txt} "s#resource.upload.path.*#resource.upload.path=${resourceUploadPath}#g" conf/common.properties
sed -i ${txt} "s#resource.storage.type.*#resource.storage.type=${resourceStorageType}#g" conf/common.properties
sed -i ${txt} "s#hadoop.security.authentication.startup.state.*#hadoop.security.authentication.startup.state=${kerberosStartUp}#g" conf/common.properties
sed -i ${txt} "s#java.security.krb5.conf.path.*#java.security.krb5.conf.path=${krb5ConfPath}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.username.*#login.user.keytab.username=${keytabUserName}#g" conf/common.properties
sed -i ${txt} "s#login.user.keytab.path.*#login.user.keytab.path=${keytabPath}#g" conf/common.properties
sed -i ${txt} "s#zookeeper.quorum.*#zookeeper.quorum=${zkQuorum}#g" conf/zookeeper.properties
sed -i ${txt} "s#server.port.*#server.port=${apiServerPort}#g" conf/application-api.properties
sed -i ${txt} "s#mail.server.host.*#mail.server.host=${mailServerHost}#g" conf/alert.properties
sed -i ${txt} "s#mail.server.port.*#mail.server.port=${mailServerPort}#g" conf/alert.properties
sed -i ${txt} "s#mail.sender.*#mail.sender=${mailSender}#g" conf/alert.properties
sed -i ${txt} "s#mail.user.*#mail.user=${mailUser}#g" conf/alert.properties
sed -i ${txt} "s#mail.passwd.*#mail.passwd=${mailPassword}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.starttls.enable.*#mail.smtp.starttls.enable=${starttlsEnable}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.trust.*#mail.smtp.ssl.trust=${sslTrust}#g" conf/alert.properties
sed -i ${txt} "s#mail.smtp.ssl.enable.*#mail.smtp.ssl.enable=${sslEnable}#g" conf/alert.properties
# 2.create directory
echo "2.create directory"
if [ ! -d $installPath ];then
sudo mkdir -p $installPath
sudo chown -R $deployUser:$deployUser $installPath
fi
# 3.scp resources
echo "3.scp resources"
sh ${workDir}/script/scp-hosts.sh
if [ $? -eq 0 ]
then
echo 'scp copy completed'
else
echo 'scp copy failed to exit'
exit 1
fi
# 4.stop server
echo "4.stop server"
sh ${workDir}/script/stop-all.sh
# 5.delete zk node
echo "5.delete zk node"
sh ${workDir}/script/remove-zk-node.sh $zkRoot
# 6.startup
echo "6.startup"
sh ${workDir}/script/start-all.sh |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,545 | [BUG] The create user is always loading when the queue does not select any value | **The create user is always loading when the queue does not select any value**
Prerequisite: queue is not required to fill
**Steps to reproduce the issue**
1. Click create user
2. User name, password, tenant, email mobile phone number are filled in correctly
3. Only the queue is empty ()
4. submit
5. error: Display "loading" all the time
**expected result**
Successfully create the user when the queue is empty, so I can use the tenant's queue, because the priority of the user's queue is higher than that of the tenant's queue.
**DolphinScheduler1.2.0**
**Screenshots**
![1587983909(1)](https://user-images.githubusercontent.com/51871547/80363158-63eb1e80-88b6-11ea-83cc-dc37183f4a76.jpg)
| https://github.com/apache/dolphinscheduler/issues/2545 | https://github.com/apache/dolphinscheduler/pull/2558 | 632b2b503a7d4fc4d9a24961d5e4139b8373ccbc | 697ea0f65625995a07b03ef892cf0f841b8926dd | "2020-04-27T10:39:35Z" | java | "2020-05-07T03:51:42Z" | dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
<template>
<m-popup
ref="popup"
:ok-text="item ? $t('Edit') : $t('Submit')"
:nameText="item ? $t('Edit User') : $t('Create User')"
@ok="_ok">
<template slot="content">
<div class="create-user-model">
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('User Name')}}</template>
<template slot="content">
<x-input
type="input"
v-model="userName"
maxlength="60"
:placeholder="$t('Please enter user name')">
</x-input>
</template>
</m-list-box-f>
<m-list-box-f v-if="router.history.current.name !== 'account'">
<template slot="name"><strong>*</strong>{{$t('Password')}}</template>
<template slot="content">
<x-input
type="password"
v-model="userPassword"
:placeholder="$t('Please enter your password')">
</x-input>
</template>
</m-list-box-f>
<m-list-box-f v-if="isADMIN">
<template slot="name"><strong>*</strong>{{$t('Tenant')}}</template>
<template slot="content">
<x-select v-model="tenantId">
<x-option
v-for="city in tenantList"
:key="city.id"
:value="city.id"
:label="city.code">
</x-option>
</x-select>
</template>
</m-list-box-f>
<m-list-box-f v-if="isADMIN">
<template slot="name">{{$t('Queue')}}</template>
<template slot="content">
<x-select v-model="queueName">
<x-input slot="trigger" slot-scope="{ selectedModel }" readonly :placeholder="$t('Please select a queue')" :value="selectedModel ? selectedModel.label : ''" style="width: 200px;" @on-click-icon.stop="queueName = ''">
<em slot="suffix" class="ans-icon-fail-solid" style="font-size: 15px;cursor: pointer;" v-show="queueName ==''"></em>
<em slot="suffix" class="ans-icon-arrow-down" style="font-size: 12px;" v-show="queueName!=''"></em>
</x-input>
<x-option
v-for="city in queueList"
:key="city.id"
:value="city.id"
:label="city.code">
</x-option>
</x-select>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name"><strong>*</strong>{{$t('Email')}}</template>
<template slot="content">
<x-input
type="input"
v-model="email"
:placeholder="$t('Please enter email')">
</x-input>
</template>
</m-list-box-f>
<m-list-box-f>
<template slot="name">{{$t('Phone')}}</template>
<template slot="content">
<x-input
type="input"
v-model="phone"
:placeholder="$t('Please enter phone number')">
</x-input>
</template>
</m-list-box-f>
</div>
</template>
</m-popup>
</template>
<script>
import _ from 'lodash'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
import router from '@/conf/home/router'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
export default {
name: 'create-user',
data () {
return {
store,
router,
queueList: [],
userName: '',
userPassword: '',
tenantId: '',
queueName: '',
email: '',
phone: '',
tenantList: [],
// Source admin user information
isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER' && router.history.current.name !== 'account'
}
},
props: {
item: Object
},
methods: {
_ok () {
if (this._verification()) {
// The name is not verified
if (this.item && this.item.groupName === this.groupName) {
this._submit()
return
}
// Verify username
this.store.dispatch(`security/verifyName`, {
type: 'user',
userName: this.userName
}).then(res => {
this._submit()
}).catch(e => {
this.$message.error(e.msg || '')
})
}
},
_verification () {
let regEmail = /^([a-zA-Z0-9]+[_|\-|\.]?)*[a-zA-Z0-9]+@([a-zA-Z0-9]+[_|\-|\.]?)*[a-zA-Z0-9]+\.[a-zA-Z]{2,}$/ // eslint-disable-line
// Mobile phone number regular
let regPhone = /^1(3|4|5|6|7|8)\d{9}$/; // eslint-disable-line
let regPassword = /^(?![0-9]+$)(?![a-z]+$)(?![A-Z]+$)(?![`~!@#$%^&*()_\-+=<>?:"{}|,.\/;'\\[\]·~!@#¥%……&*()——\-+={}|《》?:“”【】、;‘’,。、]+$)[`~!@#$%^&*()_\-+=<>?:"{}|,.\/;'\\[\]·~!@#¥%……&*()——\-+={}|《》?:“”【】、;‘’,。、0-9A-Za-z]{6,22}$/;
// user name
if (!this.userName.replace(/\s*/g,"")) {
this.$message.warning(`${i18n.$t('Please enter user name')}`)
return false
}
// password
if (this.userPassword!='' && this.item) {
if(!regPassword.test(this.userPassword)) {
this.$message.warning(`${i18n.$t('Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22')}`)
return false
}
} else if(!this.item){
if(!regPassword.test(this.userPassword)) {
this.$message.warning(`${i18n.$t('Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22')}`)
return false
}
}
// email
if (!this.email) {
this.$message.warning(`${i18n.$t('Please enter email')}`)
return false
}
// Verify email
if (!regEmail.test(this.email)) {
this.$message.warning(`${i18n.$t('Please enter the correct email format')}`)
return false
}
// Verify phone
if (this.phone) {
if (!regPhone.test(this.phone)) {
this.$message.warning(`${i18n.$t('Please enter the correct mobile phone format')}`)
return false
}
}
return true
},
_getQueueList () {
return new Promise((resolve, reject) => {
this.store.dispatch('security/getQueueList').then(res => {
this.queueList = _.map(res, v => {
return {
id: v.id,
code: v.queueName
}
})
this.$nextTick(() => {
this.queueName = this.queueList[0].id
})
resolve()
})
})
},
_getTenantList () {
return new Promise((resolve, reject) => {
this.store.dispatch('security/getTenantList').then(res => {
let arr = _.filter(res, (o) => {
return o.id !== -1
})
this.tenantList = _.map(arr, v => {
return {
id: v.id,
code: v.tenantName
}
})
this.$nextTick(() => {
this.tenantId = this.tenantList[0].id
})
resolve()
})
})
},
_submit () {
this.$refs['popup'].spinnerLoading = true
let param = {
userName: this.userName,
userPassword: this.userPassword,
tenantId: this.tenantId,
email: this.email,
queue: this.queueList.length>0? _.find(this.queueList, ['id', this.queueName]).code : '',
phone: this.phone
}
if (this.item) {
param.id = this.item.id
}
this.store.dispatch(`security/${this.item ? 'updateUser' : 'createUser'}`, param).then(res => {
setTimeout(() => {
this.$refs['popup'].spinnerLoading = false
}, 800)
this.$emit('onUpdate', param)
this.$message.success(res.msg)
}).catch(e => {
this.$message.error(e.msg || '')
this.$refs['popup'].spinnerLoading = false
})
}
},
watch: {},
created () {
// Administrator gets tenant list
if (this.isADMIN) {
Promise.all([this._getQueueList(), this._getTenantList()]).then(() => {
if (this.item) {
this.userName = this.item.userName
this.userPassword = ''
this.email = this.item.email
this.phone = this.item.phone
this.tenantId = this.item.tenantId
this.$nextTick(() => {
this.queueName = _.find(this.queueList, ['code', this.item.queue]).id||''
})
}
})
} else {
if (this.item) {
this.userName = this.item.userName
this.userPassword = ''
this.email = this.item.email
this.phone = this.item.phone
this.tenantId = this.item.tenantId
if(this.queueList.length>0) {
this.queueName = _.find(this.queueList, ['code', this.item.queue]).id
} else {
this.queueName = ''
}
}
}
},
mounted () {
},
components: { mPopup, mListBoxF }
}
</script>
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,559 | [Feature]unique processdefinition name in the same project | **branch**
dev
**Is your feature request related to a problem? Please describe.**
if the same name in one project ,im hard to recognize any one . And its usually caused by mistake
**Describe the solution you'd like**
unique processDifinition name in one project
**Additional context**
name suffix is also good | https://github.com/apache/dolphinscheduler/issues/2559 | https://github.com/apache/dolphinscheduler/pull/2617 | 33658d57fd9149cc8321e53683eeb756076fe7d7 | 6127f3a4609cda2937f47432e5c2c2ddb75c4452 | "2020-04-28T10:34:05Z" | java | "2020-05-07T09:18:42Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONArray;
import com.alibaba.fastjson.JSONObject;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.fasterxml.jackson.core.JsonProcessingException;
import org.apache.dolphinscheduler.api.dto.ProcessMeta;
import org.apache.dolphinscheduler.api.dto.treeview.Instance;
import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.CheckUtils;
import org.apache.dolphinscheduler.api.utils.FileUtils;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.exportprocess.ProcessAddTaskParam;
import org.apache.dolphinscheduler.api.utils.exportprocess.TaskNodeParamFactory;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.service.permission.PermissionCheck;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletResponse;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID;
/**
* process definition service
*/
@Service
public class ProcessDefinitionService extends BaseDAGService {
private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionService.class);
@Autowired
private ProjectMapper projectMapper;
@Autowired
private ProjectService projectService;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private ProcessService processService;
@Autowired
private WorkerGroupMapper workerGroupMapper;
/**
* create process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return create result code
* @throws JsonProcessingException JsonProcessingException
*/
public Map<String, Object> createProcessDefinition(User loginUser,
String projectName,
String name,
String processDefinitionJson,
String desc,
String locations,
String connects) throws JsonProcessingException {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
// check project auth
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefine = new ProcessDefinition();
Date now = new Date();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if (checkProcessJson.get(Constants.STATUS) != Status.SUCCESS) {
return checkProcessJson;
}
processDefine.setName(name);
processDefine.setReleaseState(ReleaseState.OFFLINE);
processDefine.setProjectId(project.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(processDefinitionJson);
processDefine.setDescription(desc);
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = processData.getGlobalParams();
if (CollectionUtils.isNotEmpty(globalParamsList)) {
Set<Property> globalParamsSet = new HashSet<>(globalParamsList);
globalParamsList = new ArrayList<>(globalParamsSet);
processDefine.setGlobalParamList(globalParamsList);
}
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
processDefineMapper.insert(processDefine);
putMsg(result, Status.SUCCESS);
result.put("processDefinitionId",processDefine.getId());
return result;
}
/**
* get resource ids
* @param processData process data
* @return resource ids
*/
private String getResourceIds(ProcessData processData) {
List<TaskNode> tasks = processData.getTasks();
Set<Integer> resourceIds = new HashSet<>();
for(TaskNode taskNode : tasks){
String taskParameter = taskNode.getParams();
AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
if (CollectionUtils.isNotEmpty(params.getResourceFilesList())) {
Set<Integer> tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
resourceIds.addAll(tempSet);
}
}
StringBuilder sb = new StringBuilder();
for(int i : resourceIds) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(i);
}
return sb.toString();
}
/**
* query process definition list
*
* @param loginUser login user
* @param projectName project name
* @return definition list
*/
public Map<String, Object> queryProcessDefinitionList(User loginUser, String projectName) {
HashMap<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
List<ProcessDefinition> resourceList = processDefineMapper.queryAllDefinitionList(project.getId());
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition list paging
*
* @param loginUser login user
* @param projectName project name
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @param userId user id
* @return process definition page
*/
public Map<String, Object> queryProcessDefinitionListPaging(User loginUser, String projectName, String searchVal, Integer pageNo, Integer pageSize, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
Page<ProcessDefinition> page = new Page(pageNo, pageSize);
IPage<ProcessDefinition> processDefinitionIPage = processDefineMapper.queryDefineListPaging(
page, searchVal, userId, project.getId(),isAdmin(loginUser));
PageInfo pageInfo = new PageInfo<ProcessData>(pageNo, pageSize);
pageInfo.setTotalCount((int)processDefinitionIPage.getTotal());
pageInfo.setLists(processDefinitionIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query datail of process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return process definition detail
*/
public Map<String, Object> queryProcessDefinitionById(User loginUser, String projectName, Integer processId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processId);
} else {
result.put(Constants.DATA_LIST, processDefinition);
putMsg(result, Status.SUCCESS);
}
return result;
}
/**
* copy process definition
*
* @param loginUser login user
* @param projectName project name
* @param processId process definition id
* @return copy result code
*/
public Map<String, Object> copyProcessDefinition(User loginUser, String projectName, Integer processId) throws JsonProcessingException{
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId);
return result;
} else {
return createProcessDefinition(
loginUser,
projectName,
processDefinition.getName()+"_copy_"+System.currentTimeMillis(),
processDefinition.getProcessDefinitionJson(),
processDefinition.getDescription(),
processDefinition.getLocations(),
processDefinition.getConnects());
}
}
/**
* update process definition
*
* @param loginUser login user
* @param projectName project name
* @param name process definition name
* @param id process definition id
* @param processDefinitionJson process definition json
* @param desc description
* @param locations locations for nodes
* @param connects connects for nodes
* @return update result code
*/
public Map<String, Object> updateProcessDefinition(User loginUser, String projectName, int id, String name,
String processDefinitionJson, String desc,
String locations, String connects) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if (resultStatus != Status.SUCCESS) {
return checkResult;
}
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
Map<String, Object> checkProcessJson = checkProcessNodeList(processData, processDefinitionJson);
if ((checkProcessJson.get(Constants.STATUS) != Status.SUCCESS)) {
return checkProcessJson;
}
ProcessDefinition processDefine = processService.findProcessDefineById(id);
if (processDefine == null) {
// check process definition exists
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, id);
return result;
} else if (processDefine.getReleaseState() == ReleaseState.ONLINE) {
// online can not permit edit
putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefine.getName());
return result;
} else {
putMsg(result, Status.SUCCESS);
}
Date now = new Date();
processDefine.setId(id);
processDefine.setName(name);
processDefine.setReleaseState(ReleaseState.OFFLINE);
processDefine.setProjectId(project.getId());
processDefine.setProcessDefinitionJson(processDefinitionJson);
processDefine.setDescription(desc);
processDefine.setLocations(locations);
processDefine.setConnects(connects);
processDefine.setTimeout(processData.getTimeout());
processDefine.setTenantId(processData.getTenantId());
processDefine.setModifyBy(loginUser.getUserName());
processDefine.setResourceIds(getResourceIds(processData));
//custom global params
List<Property> globalParamsList = new ArrayList<>();
if (CollectionUtils.isNotEmpty(processData.getGlobalParams())) {
Set<Property> userDefParamsSet = new HashSet<>(processData.getGlobalParams());
globalParamsList = new ArrayList<>(userDefParamsSet);
}
processDefine.setGlobalParamList(globalParamsList);
processDefine.setUpdateTime(now);
processDefine.setFlag(Flag.YES);
if (processDefineMapper.updateById(processDefine) > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR);
}
return result;
}
/**
* verify process definition name unique
*
* @param loginUser login user
* @param projectName project name
* @param name name
* @return true if process definition name not exists, otherwise false
*/
public Map<String, Object> verifyProcessDefinitionName(User loginUser, String projectName, String name) {
Map<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.queryByDefineName(project.getId(), name);
if (processDefinition == null) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.PROCESS_INSTANCE_EXIST, name);
}
return result;
}
/**
* delete process definition by id
*
* @param loginUser login user
* @param projectName project name
* @param processDefinitionId process definition id
* @return delete result code
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> deleteProcessDefinitionById(User loginUser, String projectName, Integer processDefinitionId) {
Map<String, Object> result = new HashMap<>(5);
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(processDefinitionId);
if (processDefinition == null) {
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionId);
return result;
}
// Determine if the login user is the owner of the process definition
if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
// check process definition is already online
if (processDefinition.getReleaseState() == ReleaseState.ONLINE) {
putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE,processDefinitionId);
return result;
}
// get the timing according to the process definition
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty() && schedules.size() > 1) {
logger.warn("scheduler num is {},Greater than 1",schedules.size());
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
return result;
}else if(schedules.size() == 1){
Schedule schedule = schedules.get(0);
if(schedule.getReleaseState() == ReleaseState.OFFLINE){
scheduleMapper.deleteById(schedule.getId());
}else if(schedule.getReleaseState() == ReleaseState.ONLINE){
putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE,schedule.getId());
return result;
}
}
int delete = processDefineMapper.deleteById(processDefinitionId);
if (delete > 0) {
putMsg(result, Status.SUCCESS);
} else {
putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_ID_ERROR);
}
return result;
}
/**
* release process definition: online / offline
*
* @param loginUser login user
* @param projectName project name
* @param id process definition id
* @param releaseState release state
* @return release result code
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> releaseProcessDefinition(User loginUser, String projectName, int id, int releaseState) {
HashMap<String, Object> result = new HashMap<>();
Project project = projectMapper.queryByName(projectName);
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultEnum = (Status) checkResult.get(Constants.STATUS);
if (resultEnum != Status.SUCCESS) {
return checkResult;
}
ReleaseState state = ReleaseState.getEnum(releaseState);
// check state
if (null == state) {
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
return result;
}
ProcessDefinition processDefinition = processDefineMapper.selectById(id);
switch (state) {
case ONLINE:
// To check resources whether they are already cancel authorized or deleted
String resourceIds = processDefinition.getResourceIds();
if (StringUtils.isNotBlank(resourceIds)) {
Integer[] resourceIdArray = Arrays.stream(resourceIds.split(",")).map(Integer::parseInt).toArray(Integer[]::new);
PermissionCheck<Integer> permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resourceIdArray,loginUser.getId(),logger);
try {
permissionCheck.checkPermission();
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, "releaseState");
return result;
}
}
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
break;
case OFFLINE:
processDefinition.setReleaseState(state);
processDefineMapper.updateById(processDefinition);
List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray(
new int[]{processDefinition.getId()}
);
for(Schedule schedule:scheduleList){
logger.info("set schedule offline, project id: {}, schedule id: {}, process definition id: {}", project.getId(), schedule.getId(), id);
// set status
schedule.setReleaseState(ReleaseState.OFFLINE);
scheduleMapper.updateById(schedule);
SchedulerService.deleteSchedule(project.getId(), schedule.getId());
}
break;
default:
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "releaseState");
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* batch export process definition by ids
* @param loginUser
* @param projectName
* @param processDefinitionIds
* @param response
*/
public void batchExportProcessDefinitionByIds(User loginUser, String projectName, String processDefinitionIds, HttpServletResponse response){
if(StringUtils.isEmpty(processDefinitionIds)){
return;
}
//export project info
Project project = projectMapper.queryByName(projectName);
//check user access for project
Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectName);
Status resultStatus = (Status) checkResult.get(Constants.STATUS);
if(resultStatus != Status.SUCCESS){
return;
}
List<ProcessMeta> processDefinitionList =
getProcessDefinitionList(processDefinitionIds);
if(CollectionUtils.isNotEmpty(processDefinitionList)){
downloadProcessDefinitionFile(response, processDefinitionList);
}
}
/**
* get process definition list by ids
* @param processDefinitionIds
* @return
*/
private List<ProcessMeta> getProcessDefinitionList(String processDefinitionIds){
List<ProcessMeta> processDefinitionList = new ArrayList<>();
String[] processDefinitionIdArray = processDefinitionIds.split(",");
for (String strProcessDefinitionId : processDefinitionIdArray) {
//get workflow info
int processDefinitionId = Integer.parseInt(strProcessDefinitionId);
ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(processDefinitionId);
if (null != processDefinition) {
processDefinitionList.add(exportProcessMetaData(processDefinitionId, processDefinition));
}
}
return processDefinitionList;
}
/**
* download the process definition file
* @param response
* @param processDefinitionList
*/
private void downloadProcessDefinitionFile(HttpServletResponse response, List<ProcessMeta> processDefinitionList) {
response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE);
BufferedOutputStream buff = null;
ServletOutputStream out = null;
try {
out = response.getOutputStream();
buff = new BufferedOutputStream(out);
buff.write(JSON.toJSONString(processDefinitionList).getBytes(StandardCharsets.UTF_8));
buff.flush();
buff.close();
} catch (IOException e) {
logger.warn("export process fail", e);
}finally {
if (null != buff) {
try {
buff.close();
} catch (Exception e) {
logger.warn("export process buffer not close", e);
}
}
if (null != out) {
try {
out.close();
} catch (Exception e) {
logger.warn("export process output stream not close", e);
}
}
}
}
/**
* get export process metadata string
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @return export process metadata string
*/
public String exportProcessMetaDataStr(Integer processDefinitionId, ProcessDefinition processDefinition) {
//create workflow json file
return JSONUtils.toJsonString(exportProcessMetaData(processDefinitionId,processDefinition));
}
/**
* get export process metadata string
* @param processDefinitionId process definition id
* @param processDefinition process definition
* @return export process metadata string
*/
public ProcessMeta exportProcessMetaData(Integer processDefinitionId, ProcessDefinition processDefinition) {
//correct task param which has data source or dependent param
String correctProcessDefinitionJson = addExportTaskNodeSpecialParam(processDefinition.getProcessDefinitionJson());
processDefinition.setProcessDefinitionJson(correctProcessDefinitionJson);
//export process metadata
ProcessMeta exportProcessMeta = new ProcessMeta();
exportProcessMeta.setProjectName(processDefinition.getProjectName());
exportProcessMeta.setProcessDefinitionName(processDefinition.getName());
exportProcessMeta.setProcessDefinitionJson(processDefinition.getProcessDefinitionJson());
exportProcessMeta.setProcessDefinitionLocations(processDefinition.getLocations());
exportProcessMeta.setProcessDefinitionConnects(processDefinition.getConnects());
//schedule info
List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionId(processDefinitionId);
if (!schedules.isEmpty()) {
Schedule schedule = schedules.get(0);
exportProcessMeta.setScheduleWarningType(schedule.getWarningType().toString());
exportProcessMeta.setScheduleWarningGroupId(schedule.getWarningGroupId());
exportProcessMeta.setScheduleStartTime(DateUtils.dateToString(schedule.getStartTime()));
exportProcessMeta.setScheduleEndTime(DateUtils.dateToString(schedule.getEndTime()));
exportProcessMeta.setScheduleCrontab(schedule.getCrontab());
exportProcessMeta.setScheduleFailureStrategy(String.valueOf(schedule.getFailureStrategy()));
exportProcessMeta.setScheduleReleaseState(String.valueOf(ReleaseState.OFFLINE));
exportProcessMeta.setScheduleProcessInstancePriority(String.valueOf(schedule.getProcessInstancePriority()));
exportProcessMeta.setScheduleWorkerGroupName(schedule.getWorkerGroup());
}
//create workflow json file
return exportProcessMeta;
}
/**
* correct task param which has datasource or dependent
* @param processDefinitionJson processDefinitionJson
* @return correct processDefinitionJson
*/
public String addExportTaskNodeSpecialParam(String processDefinitionJson) {
JSONObject jsonObject = JSONUtils.parseObject(processDefinitionJson);
JSONArray jsonArray = (JSONArray) jsonObject.get("tasks");
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
if (StringUtils.isNotEmpty(taskNode.getString("type"))) {
String taskType = taskNode.getString("type");
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addExportSpecialParam(taskNode);
}
}
}
jsonObject.put("tasks", jsonArray);
return jsonObject.toString();
}
/**
* check task if has sub process
* @param taskType task type
* @return if task has sub process return true else false
*/
private boolean checkTaskHasSubProcess(String taskType) {
return taskType.equals(TaskType.SUB_PROCESS.name());
}
/**
* import process definition
* @param loginUser login user
* @param file process metadata json file
* @param currentProjectName current project name
* @return import process
*/
@Transactional(rollbackFor = Exception.class)
public Map<String, Object> importProcessDefinition(User loginUser, MultipartFile file, String currentProjectName) {
Map<String, Object> result = new HashMap<>(5);
String processMetaJson = FileUtils.file2String(file);
List<ProcessMeta> processMetaList = JSON.parseArray(processMetaJson,ProcessMeta.class);
//check file content
if (CollectionUtils.isEmpty(processMetaList)) {
putMsg(result, Status.DATA_IS_NULL, "fileContent");
return result;
}
for(ProcessMeta processMeta:processMetaList){
if (!checkAndImportProcessDefinition(loginUser, currentProjectName, result, processMeta)){
return result;
}
}
return result;
}
/**
* check and import process definition
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @return
*/
private boolean checkAndImportProcessDefinition(User loginUser, String currentProjectName, Map<String, Object> result, ProcessMeta processMeta) {
if(!checkImportanceParams(processMeta,result)){
return false;
}
//deal with process name
String processDefinitionName = processMeta.getProcessDefinitionName();
//use currentProjectName to query
Project targetProject = projectMapper.queryByName(currentProjectName);
if(null != targetProject){
processDefinitionName = recursionProcessDefinitionName(targetProject.getId(),
processDefinitionName, 1);
}
// get create process result
Map<String, Object> createProcessResult =
getCreateProcessResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
addImportTaskNodeParam(loginUser, processMeta.getProcessDefinitionJson(), targetProject));
if(createProcessResult == null){
return false;
}
//create process definition
Integer processDefinitionId =
Objects.isNull(createProcessResult.get("processDefinitionId"))?
null:Integer.parseInt(createProcessResult.get("processDefinitionId").toString());
//scheduler param
return getImportProcessScheduleResult(loginUser,
currentProjectName,
result,
processMeta,
processDefinitionName,
processDefinitionId);
}
/**
* get create process result
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @param processDefinitionName
* @param importProcessParam
* @return
*/
private Map<String, Object> getCreateProcessResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
String importProcessParam){
Map<String, Object> createProcessResult = null;
try {
createProcessResult = createProcessDefinition(loginUser
,currentProjectName,
processDefinitionName+"_import_"+System.currentTimeMillis(),
importProcessParam,
processMeta.getProcessDefinitionDescription(),
processMeta.getProcessDefinitionLocations(),
processMeta.getProcessDefinitionConnects());
putMsg(result, Status.SUCCESS);
} catch (JsonProcessingException e) {
logger.error("import process meta json data: {}", e.getMessage(), e);
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
}
return createProcessResult;
}
/**
* get import process schedule result
* @param loginUser
* @param currentProjectName
* @param result
* @param processMeta
* @param processDefinitionName
* @param processDefinitionId
* @return
*/
private boolean getImportProcessScheduleResult(User loginUser,
String currentProjectName,
Map<String, Object> result,
ProcessMeta processMeta,
String processDefinitionName,
Integer processDefinitionId) {
if (null != processMeta.getScheduleCrontab() && null != processDefinitionId) {
int scheduleInsert = importProcessSchedule(loginUser,
currentProjectName,
processMeta,
processDefinitionName,
processDefinitionId);
if (0 == scheduleInsert) {
putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR);
return false;
}
}
return true;
}
/**
* check importance params
* @param processMeta
* @param result
* @return
*/
private boolean checkImportanceParams(ProcessMeta processMeta,Map<String, Object> result){
if (StringUtils.isEmpty(processMeta.getProjectName())) {
putMsg(result, Status.DATA_IS_NULL, "projectName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionName())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionName");
return false;
}
if (StringUtils.isEmpty(processMeta.getProcessDefinitionJson())) {
putMsg(result, Status.DATA_IS_NULL, "processDefinitionJson");
return false;
}
return true;
}
/**
* import process add special task param
* @param loginUser login user
* @param processDefinitionJson process definition json
* @param targetProject target project
* @return import process param
*/
private String addImportTaskNodeParam(User loginUser, String processDefinitionJson, Project targetProject) {
JSONObject jsonObject = JSONUtils.parseObject(processDefinitionJson);
JSONArray jsonArray = (JSONArray) jsonObject.get("tasks");
//add sql and dependent param
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
String taskType = taskNode.getString("type");
ProcessAddTaskParam addTaskParam = TaskNodeParamFactory.getByTaskType(taskType);
if (null != addTaskParam) {
addTaskParam.addImportSpecialParam(taskNode);
}
}
//recursive sub-process parameter correction map key for old process id value for new process id
Map<Integer, Integer> subProcessIdMap = new HashMap<>(20);
List<Object> subProcessList = jsonArray.stream()
.filter(elem -> checkTaskHasSubProcess(JSONUtils.parseObject(elem.toString()).getString("type")))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, jsonArray, subProcessIdMap);
}
jsonObject.put("tasks", jsonArray);
return jsonObject.toString();
}
/**
* import process schedule
* @param loginUser login user
* @param currentProjectName current project name
* @param processMeta process meta data
* @param processDefinitionName process definition name
* @param processDefinitionId process definition id
* @return insert schedule flag
*/
public int importProcessSchedule(User loginUser, String currentProjectName, ProcessMeta processMeta,
String processDefinitionName, Integer processDefinitionId) {
Date now = new Date();
Schedule scheduleObj = new Schedule();
scheduleObj.setProjectName(currentProjectName);
scheduleObj.setProcessDefinitionId(processDefinitionId);
scheduleObj.setProcessDefinitionName(processDefinitionName);
scheduleObj.setCreateTime(now);
scheduleObj.setUpdateTime(now);
scheduleObj.setUserId(loginUser.getId());
scheduleObj.setUserName(loginUser.getUserName());
scheduleObj.setCrontab(processMeta.getScheduleCrontab());
if (null != processMeta.getScheduleStartTime()) {
scheduleObj.setStartTime(DateUtils.stringToDate(processMeta.getScheduleStartTime()));
}
if (null != processMeta.getScheduleEndTime()) {
scheduleObj.setEndTime(DateUtils.stringToDate(processMeta.getScheduleEndTime()));
}
if (null != processMeta.getScheduleWarningType()) {
scheduleObj.setWarningType(WarningType.valueOf(processMeta.getScheduleWarningType()));
}
if (null != processMeta.getScheduleWarningGroupId()) {
scheduleObj.setWarningGroupId(processMeta.getScheduleWarningGroupId());
}
if (null != processMeta.getScheduleFailureStrategy()) {
scheduleObj.setFailureStrategy(FailureStrategy.valueOf(processMeta.getScheduleFailureStrategy()));
}
if (null != processMeta.getScheduleReleaseState()) {
scheduleObj.setReleaseState(ReleaseState.valueOf(processMeta.getScheduleReleaseState()));
}
if (null != processMeta.getScheduleProcessInstancePriority()) {
scheduleObj.setProcessInstancePriority(Priority.valueOf(processMeta.getScheduleProcessInstancePriority()));
}
if (null != processMeta.getScheduleWorkerGroupName()) {
scheduleObj.setWorkerGroup(processMeta.getScheduleWorkerGroupName());
}
return scheduleMapper.insert(scheduleObj);
}
/**
* check import process has sub process
* recursion create sub process
* @param loginUser login user
* @param targetProject target project
* @param jsonArray process task array
* @param subProcessIdMap correct sub process id map
*/
public void importSubProcess(User loginUser, Project targetProject, JSONArray jsonArray, Map<Integer, Integer> subProcessIdMap) {
for (int i = 0; i < jsonArray.size(); i++) {
JSONObject taskNode = jsonArray.getJSONObject(i);
String taskType = taskNode.getString("type");
if (checkTaskHasSubProcess(taskType)) {
//get sub process info
JSONObject subParams = JSONUtils.parseObject(taskNode.getString("params"));
Integer subProcessId = subParams.getInteger("processDefinitionId");
ProcessDefinition subProcess = processDefineMapper.queryByDefineId(subProcessId);
//check is sub process exist in db
if (null != subProcess) {
String subProcessJson = subProcess.getProcessDefinitionJson();
//check current project has sub process
ProcessDefinition currentProjectSubProcess = processDefineMapper.queryByDefineName(targetProject.getId(), subProcess.getName());
if (null == currentProjectSubProcess) {
JSONArray subJsonArray = (JSONArray) JSONUtils.parseObject(subProcess.getProcessDefinitionJson()).get("tasks");
List<Object> subProcessList = subJsonArray.stream()
.filter(item -> checkTaskHasSubProcess(JSONUtils.parseObject(item.toString()).getString("type")))
.collect(Collectors.toList());
if (CollectionUtils.isNotEmpty(subProcessList)) {
importSubProcess(loginUser, targetProject, subJsonArray, subProcessIdMap);
//sub process processId correct
if (!subProcessIdMap.isEmpty()) {
for (Map.Entry<Integer, Integer> entry : subProcessIdMap.entrySet()) {
String oldSubProcessId = "\"processDefinitionId\":" + entry.getKey();
String newSubProcessId = "\"processDefinitionId\":" + entry.getValue();
subProcessJson = subProcessJson.replaceAll(oldSubProcessId, newSubProcessId);
}
subProcessIdMap.clear();
}
}
//if sub-process recursion
Date now = new Date();
//create sub process in target project
ProcessDefinition processDefine = new ProcessDefinition();
processDefine.setName(subProcess.getName());
processDefine.setVersion(subProcess.getVersion());
processDefine.setReleaseState(subProcess.getReleaseState());
processDefine.setProjectId(targetProject.getId());
processDefine.setUserId(loginUser.getId());
processDefine.setProcessDefinitionJson(subProcessJson);
processDefine.setDescription(subProcess.getDescription());
processDefine.setLocations(subProcess.getLocations());
processDefine.setConnects(subProcess.getConnects());
processDefine.setTimeout(subProcess.getTimeout());
processDefine.setTenantId(subProcess.getTenantId());
processDefine.setGlobalParams(subProcess.getGlobalParams());
processDefine.setCreateTime(now);
processDefine.setUpdateTime(now);
processDefine.setFlag(subProcess.getFlag());
processDefine.setReceivers(subProcess.getReceivers());
processDefine.setReceiversCc(subProcess.getReceiversCc());
processDefineMapper.insert(processDefine);
logger.info("create sub process, project: {}, process name: {}", targetProject.getName(), processDefine.getName());
//modify task node
ProcessDefinition newSubProcessDefine = processDefineMapper.queryByDefineName(processDefine.getProjectId(),processDefine.getName());
if (null != newSubProcessDefine) {
subProcessIdMap.put(subProcessId, newSubProcessDefine.getId());
subParams.put("processDefinitionId", newSubProcessDefine.getId());
taskNode.put("params", subParams);
}
}
}
}
}
}
/**
* check the process definition node meets the specifications
*
* @param processData process data
* @param processDefinitionJson process definition json
* @return check result code
*/
public Map<String, Object> checkProcessNodeList(ProcessData processData, String processDefinitionJson) {
Map<String, Object> result = new HashMap<>(5);
try {
if (processData == null) {
logger.error("process data is null");
putMsg(result,Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
// Check whether the task node is normal
List<TaskNode> taskNodes = processData.getTasks();
if (taskNodes == null) {
logger.error("process node info is empty");
putMsg(result, Status.DATA_IS_NULL, processDefinitionJson);
return result;
}
// check has cycle
if (graphHasCycle(taskNodes)) {
logger.error("process DAG has cycle");
putMsg(result, Status.PROCESS_NODE_HAS_CYCLE);
return result;
}
// check whether the process definition json is normal
for (TaskNode taskNode : taskNodes) {
if (!CheckUtils.checkTaskNodeParameters(taskNode.getParams(), taskNode.getType())) {
logger.error("task node {} parameter invalid", taskNode.getName());
putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName());
return result;
}
// check extra params
CheckUtils.checkOtherParams(taskNode.getExtras());
}
putMsg(result,Status.SUCCESS);
} catch (Exception e) {
result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
result.put(Constants.MSG, e.getMessage());
}
return result;
}
/**
* get task node details based on process definition
*
* @param defineId define id
* @return task node list
* @throws Exception exception
*/
public Map<String, Object> getTaskNodeListByDefinitionId(Integer defineId) throws Exception {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineId);
return result;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
putMsg(result,Status.DATA_IS_NOT_VALID, processDefinitionJson);
return result;
}
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
result.put(Constants.DATA_LIST, taskNodeList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* get task node details based on process definition
*
* @param defineIdList define id list
* @return task node list
* @throws Exception exception
*/
public Map<String, Object> getTaskNodeListByDefinitionIdList(String defineIdList) throws Exception {
Map<String, Object> result = new HashMap<>();
Map<Integer, List<TaskNode>> taskNodeMap = new HashMap<>();
String[] idList = defineIdList.split(",");
List<Integer> idIntList = new ArrayList<>();
for(String definitionId : idList) {
idIntList.add(Integer.parseInt(definitionId));
}
Integer[] idArray = idIntList.toArray(new Integer[idIntList.size()]);
List<ProcessDefinition> processDefinitionList = processDefineMapper.queryDefinitionListByIdList(idArray);
if (CollectionUtils.isEmpty(processDefinitionList)) {
logger.info("process definition not exists");
putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, defineIdList);
return result;
}
for(ProcessDefinition processDefinition : processDefinitionList){
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = (processData.getTasks() == null) ? new ArrayList<>() : processData.getTasks();
taskNodeMap.put(processDefinition.getId(), taskNodeList);
}
result.put(Constants.DATA_LIST, taskNodeMap);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* query process definition all by project id
*
* @param projectId project id
* @return process definitions in the project
*/
public Map<String, Object> queryProcessDefinitionAllByProjectId(Integer projectId) {
HashMap<String, Object> result = new HashMap<>(5);
List<ProcessDefinition> resourceList = processDefineMapper.queryAllDefinitionList(projectId);
result.put(Constants.DATA_LIST, resourceList);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* Encapsulates the TreeView structure
*
* @param processId process definition id
* @param limit limit
* @return tree view json data
* @throws Exception exception
*/
public Map<String, Object> viewTree(Integer processId, Integer limit) throws Exception {
Map<String, Object> result = new HashMap<>();
ProcessDefinition processDefinition = processDefineMapper.selectById(processId);
if (null == processDefinition) {
logger.info("process define not exists");
putMsg(result,Status.PROCESS_DEFINE_NOT_EXIST, processDefinition);
return result;
}
DAG<String, TaskNode, TaskNodeRelation> dag = genDagGraph(processDefinition);
/**
* nodes that is running
*/
Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>();
/**
* nodes that is waiting torun
*/
Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>();
/**
* List of process instances
*/
List<ProcessInstance> processInstanceList = processInstanceMapper.queryByProcessDefineId(processId, limit);
for(ProcessInstance processInstance:processInstanceList){
processInstance.setDuration(DateUtils.differSec(processInstance.getStartTime(),processInstance.getEndTime()));
}
if (limit > processInstanceList.size()) {
limit = processInstanceList.size();
}
TreeViewDto parentTreeViewDto = new TreeViewDto();
parentTreeViewDto.setName("DAG");
parentTreeViewDto.setType("");
// Specify the process definition, because it is a TreeView for a process definition
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime();
parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), "", processInstance.getState().toString()
, processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime())));
}
List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>();
parentTreeViewDtoList.add(parentTreeViewDto);
// Here is the encapsulation task instance
for (String startNode : dag.getBeginNode()) {
runningNodeMap.put(startNode, parentTreeViewDtoList);
}
while (Stopper.isRunning()) {
Set<String> postNodeList = null;
Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<String, List<TreeViewDto>> en = iter.next();
String nodeName = en.getKey();
parentTreeViewDtoList = en.getValue();
TreeViewDto treeViewDto = new TreeViewDto();
treeViewDto.setName(nodeName);
TaskNode taskNode = dag.getNode(nodeName);
treeViewDto.setType(taskNode.getType());
//set treeViewDto instances
for (int i = limit - 1; i >= 0; i--) {
ProcessInstance processInstance = processInstanceList.get(i);
TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName);
if (taskInstance == null) {
treeViewDto.getInstances().add(new Instance(-1, "not running", "null"));
} else {
Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime();
Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime();
int subProcessId = 0;
/**
* if process is sub process, the return sub id, or sub id=0
*/
if (taskInstance.getTaskType().equals(TaskType.SUB_PROCESS.name())) {
String taskJson = taskInstance.getTaskJson();
taskNode = JSON.parseObject(taskJson, TaskNode.class);
subProcessId = Integer.parseInt(JSON.parseObject(
taskNode.getParams()).getString(CMDPARAM_SUB_PROCESS_DEFINE_ID));
}
treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString()
, taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId));
}
}
for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) {
pTreeViewDto.getChildren().add(treeViewDto);
}
postNodeList = dag.getSubsequentNodes(nodeName);
if (CollectionUtils.isNotEmpty(postNodeList)) {
for (String nextNodeName : postNodeList) {
List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName);
if (CollectionUtils.isNotEmpty(treeViewDtoList)) {
treeViewDtoList.add(treeViewDto);
waitingRunningNodeMap.put(nextNodeName, treeViewDtoList);
} else {
treeViewDtoList = new ArrayList<>();
treeViewDtoList.add(treeViewDto);
waitingRunningNodeMap.put(nextNodeName, treeViewDtoList);
}
}
}
runningNodeMap.remove(nodeName);
}
if (waitingRunningNodeMap == null || waitingRunningNodeMap.size() == 0) {
break;
} else {
runningNodeMap.putAll(waitingRunningNodeMap);
waitingRunningNodeMap.clear();
}
}
result.put(Constants.DATA_LIST, parentTreeViewDto);
result.put(Constants.STATUS, Status.SUCCESS);
result.put(Constants.MSG, Status.SUCCESS.getMsg());
return result;
}
/**
* Generate the DAG Graph based on the process definition id
*
* @param processDefinition process definition
* @return dag graph
* @throws Exception if exception happens
*/
private DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data
if (null != processData) {
List<TaskNode> taskNodeList = processData.getTasks();
processDefinition.setGlobalParamList(processData.getGlobalParams());
ProcessDag processDag = DagHelper.getProcessDag(taskNodeList);
// Generate concrete Dag to be executed
return DagHelper.buildDagGraph(processDag);
}
return new DAG<>();
}
/**
* whether the graph has a ring
*
* @param taskNodeResponseList task node response list
* @return if graph has cycle flag
*/
private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) {
DAG<String, TaskNode, String> graph = new DAG<>();
// Fill the vertices
for (TaskNode taskNodeResponse : taskNodeResponseList) {
graph.addNode(taskNodeResponse.getName(), taskNodeResponse);
}
// Fill edge relations
for (TaskNode taskNodeResponse : taskNodeResponseList) {
taskNodeResponse.getPreTasks();
List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(),String.class);
if (CollectionUtils.isNotEmpty(preTasks)) {
for (String preTask : preTasks) {
if (!graph.addEdge(preTask, taskNodeResponse.getName())) {
return true;
}
}
}
}
return graph.hasCycle();
}
private String recursionProcessDefinitionName(Integer projectId,String processDefinitionName,int num){
ProcessDefinition processDefinition = processDefineMapper.queryByDefineName(projectId, processDefinitionName);
if (processDefinition != null) {
if(num > 1){
String str = processDefinitionName.substring(0,processDefinitionName.length() - 3);
processDefinitionName = str + "("+num+")";
}else{
processDefinitionName = processDefinition.getName() + "("+num+")";
}
}else{
return processDefinitionName;
}
return recursionProcessDefinitionName(projectId,processDefinitionName,num + 1);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,559 | [Feature]unique processdefinition name in the same project | **branch**
dev
**Is your feature request related to a problem? Please describe.**
if the same name in one project ,im hard to recognize any one . And its usually caused by mistake
**Describe the solution you'd like**
unique processDifinition name in one project
**Additional context**
name suffix is also good | https://github.com/apache/dolphinscheduler/issues/2559 | https://github.com/apache/dolphinscheduler/pull/2617 | 33658d57fd9149cc8321e53683eeb756076fe7d7 | 6127f3a4609cda2937f47432e5c2c2ddb75c4452 | "2020-04-28T10:34:05Z" | java | "2020-05-07T09:18:42Z" | sql/dolphinscheduler_mysql.sql | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
SET FOREIGN_KEY_CHECKS=0;
-- ----------------------------
-- Table structure for QRTZ_BLOB_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`;
CREATE TABLE `QRTZ_BLOB_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`BLOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_BLOB_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CALENDARS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CALENDARS`;
CREATE TABLE `QRTZ_CALENDARS` (
`SCHED_NAME` varchar(120) NOT NULL,
`CALENDAR_NAME` varchar(200) NOT NULL,
`CALENDAR` blob NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_CALENDARS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_CRON_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`;
CREATE TABLE `QRTZ_CRON_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`CRON_EXPRESSION` varchar(120) NOT NULL,
`TIME_ZONE_ID` varchar(80) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_CRON_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_FIRED_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`;
CREATE TABLE `QRTZ_FIRED_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`ENTRY_ID` varchar(95) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`FIRED_TIME` bigint(13) NOT NULL,
`SCHED_TIME` bigint(13) NOT NULL,
`PRIORITY` int(11) NOT NULL,
`STATE` varchar(16) NOT NULL,
`JOB_NAME` varchar(200) DEFAULT NULL,
`JOB_GROUP` varchar(200) DEFAULT NULL,
`IS_NONCONCURRENT` varchar(1) DEFAULT NULL,
`REQUESTS_RECOVERY` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`),
KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`),
KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_FIRED_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_JOB_DETAILS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`;
CREATE TABLE `QRTZ_JOB_DETAILS` (
`SCHED_NAME` varchar(120) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`JOB_CLASS_NAME` varchar(250) NOT NULL,
`IS_DURABLE` varchar(1) NOT NULL,
`IS_NONCONCURRENT` varchar(1) NOT NULL,
`IS_UPDATE_DATA` varchar(1) NOT NULL,
`REQUESTS_RECOVERY` varchar(1) NOT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`),
KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_JOB_DETAILS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_LOCKS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_LOCKS`;
CREATE TABLE `QRTZ_LOCKS` (
`SCHED_NAME` varchar(120) NOT NULL,
`LOCK_NAME` varchar(40) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_LOCKS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`;
CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_PAUSED_TRIGGER_GRPS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SCHEDULER_STATE
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`;
CREATE TABLE `QRTZ_SCHEDULER_STATE` (
`SCHED_NAME` varchar(120) NOT NULL,
`INSTANCE_NAME` varchar(200) NOT NULL,
`LAST_CHECKIN_TIME` bigint(13) NOT NULL,
`CHECKIN_INTERVAL` bigint(13) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_SCHEDULER_STATE
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`REPEAT_COUNT` bigint(7) NOT NULL,
`REPEAT_INTERVAL` bigint(12) NOT NULL,
`TIMES_TRIGGERED` bigint(10) NOT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_SIMPLE_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`;
CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`STR_PROP_1` varchar(512) DEFAULT NULL,
`STR_PROP_2` varchar(512) DEFAULT NULL,
`STR_PROP_3` varchar(512) DEFAULT NULL,
`INT_PROP_1` int(11) DEFAULT NULL,
`INT_PROP_2` int(11) DEFAULT NULL,
`LONG_PROP_1` bigint(20) DEFAULT NULL,
`LONG_PROP_2` bigint(20) DEFAULT NULL,
`DEC_PROP_1` decimal(13,4) DEFAULT NULL,
`DEC_PROP_2` decimal(13,4) DEFAULT NULL,
`BOOL_PROP_1` varchar(1) DEFAULT NULL,
`BOOL_PROP_2` varchar(1) DEFAULT NULL,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_SIMPROP_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for QRTZ_TRIGGERS
-- ----------------------------
DROP TABLE IF EXISTS `QRTZ_TRIGGERS`;
CREATE TABLE `QRTZ_TRIGGERS` (
`SCHED_NAME` varchar(120) NOT NULL,
`TRIGGER_NAME` varchar(200) NOT NULL,
`TRIGGER_GROUP` varchar(200) NOT NULL,
`JOB_NAME` varchar(200) NOT NULL,
`JOB_GROUP` varchar(200) NOT NULL,
`DESCRIPTION` varchar(250) DEFAULT NULL,
`NEXT_FIRE_TIME` bigint(13) DEFAULT NULL,
`PREV_FIRE_TIME` bigint(13) DEFAULT NULL,
`PRIORITY` int(11) DEFAULT NULL,
`TRIGGER_STATE` varchar(16) NOT NULL,
`TRIGGER_TYPE` varchar(8) NOT NULL,
`START_TIME` bigint(13) NOT NULL,
`END_TIME` bigint(13) DEFAULT NULL,
`CALENDAR_NAME` varchar(200) DEFAULT NULL,
`MISFIRE_INSTR` smallint(2) DEFAULT NULL,
`JOB_DATA` blob,
PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`),
KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`),
KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`),
KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`),
KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`),
CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
-- ----------------------------
-- Records of QRTZ_TRIGGERS
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_access_token
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_access_token`;
CREATE TABLE `t_ds_access_token` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`token` varchar(64) DEFAULT NULL COMMENT 'token',
`expire_time` datetime DEFAULT NULL COMMENT 'end time of token ',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_access_token
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alert
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alert`;
CREATE TABLE `t_ds_alert` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`title` varchar(64) DEFAULT NULL COMMENT 'title',
`show_type` tinyint(4) DEFAULT NULL COMMENT 'send email type,0:TABLE,1:TEXT',
`content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)',
`alert_type` tinyint(4) DEFAULT NULL COMMENT '0:email,1:sms',
`alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed',
`log` text COMMENT 'log',
`alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`receivers` text COMMENT 'receivers',
`receivers_cc` text COMMENT 'cc',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alert
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_alertgroup`;
CREATE TABLE `t_ds_alertgroup` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`group_name` varchar(255) DEFAULT NULL COMMENT 'group name',
`group_type` tinyint(4) DEFAULT NULL COMMENT 'Group type (message 0, SMS 1...)',
`description` varchar(255) DEFAULT NULL,
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_command`;
CREATE TABLE `t_ds_command` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`dependence` varchar(255) DEFAULT NULL COMMENT 'dependence',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_datasource
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_datasource`;
CREATE TABLE `t_ds_datasource` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(64) NOT NULL COMMENT 'data source name',
`note` varchar(256) DEFAULT NULL COMMENT 'description',
`type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark',
`user_id` int(11) NOT NULL COMMENT 'the creator id',
`connection_params` text NOT NULL COMMENT 'json connection params',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_datasource
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_error_command
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_error_command`;
CREATE TABLE `t_ds_error_command` (
`id` int(11) NOT NULL COMMENT 'key',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`executor_id` int(11) DEFAULT NULL COMMENT 'executor id',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time',
`start_time` datetime DEFAULT NULL COMMENT 'start time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`dependence` text COMMENT 'dependence',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) COMMENT 'worker group',
`message` text COMMENT 'message',
PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;
-- ----------------------------
-- Records of t_ds_error_command
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_definition
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_definition`;
CREATE TABLE `t_ds_process_definition` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'process definition name',
`version` int(11) DEFAULT NULL COMMENT 'process definition version',
`release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online',
`project_id` int(11) DEFAULT NULL COMMENT 'project id',
`user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id',
`process_definition_json` longtext COMMENT 'process definition json content',
`description` text,
`global_params` text COMMENT 'global parameters',
`flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`receivers` text COMMENT 'receivers',
`receivers_cc` text COMMENT 'cc',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`modify_by` varchar(255) DEFAULT NULL,
`resource_ids` varchar(255) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `process_definition_index` (`project_id`,`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_definition
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_process_instance`;
CREATE TABLE `t_ds_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'process instance name',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance',
`start_time` datetime DEFAULT NULL COMMENT 'process instance start time',
`end_time` datetime DEFAULT NULL COMMENT 'process instance end time',
`run_times` int(11) DEFAULT NULL COMMENT 'process instance run times',
`host` varchar(45) DEFAULT NULL COMMENT 'process instance host',
`command_type` tinyint(4) DEFAULT NULL COMMENT 'command type',
`command_param` text COMMENT 'json command parameters',
`task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes',
`max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times',
`failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed',
`warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id',
`schedule_time` datetime DEFAULT NULL COMMENT 'schedule time',
`command_start_time` datetime DEFAULT NULL COMMENT 'command start time',
`global_params` text COMMENT 'global parameters',
`process_instance_json` longtext COMMENT 'process instance json(copy的process definition 的json)',
`flag` tinyint(4) DEFAULT '1' COMMENT 'flag',
`update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process',
`executor_id` int(11) NOT NULL COMMENT 'executor id',
`locations` text COMMENT 'Node location information',
`connects` text COMMENT 'Node connection information',
`history_cmd` text COMMENT 'history commands of process instance operation',
`dependence_schedule_times` text COMMENT 'depend schedule fire time',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`timeout` int(11) DEFAULT '0' COMMENT 'time out',
`tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id',
PRIMARY KEY (`id`),
KEY `process_instance_index` (`process_definition_id`,`id`) USING BTREE,
KEY `start_time_index` (`start_time`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_project
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_project`;
CREATE TABLE `t_ds_project` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(100) DEFAULT NULL COMMENT 'project name',
`description` varchar(200) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'creator id',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',
`update_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_project
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_queue
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_queue`;
CREATE TABLE `t_ds_queue` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name',
`queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_queue
-- ----------------------------
INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null);
-- ----------------------------
-- Table structure for t_ds_relation_datasource_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_datasource_user`;
CREATE TABLE `t_ds_relation_datasource_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`datasource_id` int(11) DEFAULT NULL COMMENT 'data source id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_datasource_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_process_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_process_instance`;
CREATE TABLE `t_ds_relation_process_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_process_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_project_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_project_user`;
CREATE TABLE `t_ds_relation_project_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`project_id` int(11) DEFAULT NULL COMMENT 'project id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`),
KEY `user_id_index` (`user_id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_project_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_resources_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_resources_user`;
CREATE TABLE `t_ds_relation_resources_user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`user_id` int(11) NOT NULL COMMENT 'user id',
`resources_id` int(11) DEFAULT NULL COMMENT 'resource id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_resources_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_udfs_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_udfs_user`;
CREATE TABLE `t_ds_relation_udfs_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'userid',
`udf_id` int(11) DEFAULT NULL COMMENT 'udf id',
`perm` int(11) DEFAULT '1' COMMENT 'limits of authority',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_udfs_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_relation_user_alertgroup
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_relation_user_alertgroup`;
CREATE TABLE `t_ds_relation_user_alertgroup` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_relation_user_alertgroup
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_resources
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_resources`;
CREATE TABLE `t_ds_resources` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`alias` varchar(64) DEFAULT NULL COMMENT 'alias',
`file_name` varchar(64) DEFAULT NULL COMMENT 'file name',
`description` varchar(256) DEFAULT NULL,
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF',
`size` bigint(20) DEFAULT NULL COMMENT 'resource size',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`pid` int(11) DEFAULT NULL,
`full_name` varchar(64) DEFAULT NULL,
`is_directory` tinyint(4) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_resources
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_schedules
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_schedules`;
CREATE TABLE `t_ds_schedules` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`process_definition_id` int(11) NOT NULL COMMENT 'process definition id',
`start_time` datetime NOT NULL COMMENT 'start time',
`end_time` datetime NOT NULL COMMENT 'end time',
`crontab` varchar(256) NOT NULL COMMENT 'crontab description',
`failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue',
`user_id` int(11) NOT NULL COMMENT 'user id',
`release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ',
`warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent',
`warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id',
`process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(256) DEFAULT '' COMMENT 'worker group id',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_schedules
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_session
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_session`;
CREATE TABLE `t_ds_session` (
`id` varchar(64) NOT NULL COMMENT 'key',
`user_id` int(11) DEFAULT NULL COMMENT 'user id',
`ip` varchar(45) DEFAULT NULL COMMENT 'ip',
`last_login_time` datetime DEFAULT NULL COMMENT 'last login time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_session
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_task_instance
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_task_instance`;
CREATE TABLE `t_ds_task_instance` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`name` varchar(255) DEFAULT NULL COMMENT 'task name',
`task_type` varchar(64) DEFAULT NULL COMMENT 'task type',
`process_definition_id` int(11) DEFAULT NULL COMMENT 'process definition id',
`process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id',
`task_json` longtext COMMENT 'task content json',
`state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete',
`submit_time` datetime DEFAULT NULL COMMENT 'task submit time',
`start_time` datetime DEFAULT NULL COMMENT 'task start time',
`end_time` datetime DEFAULT NULL COMMENT 'task end time',
`host` varchar(45) DEFAULT NULL COMMENT 'host of task running on',
`execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host',
`log_path` varchar(200) DEFAULT NULL COMMENT 'task log path',
`alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert',
`retry_times` int(4) DEFAULT '0' COMMENT 'task retry times',
`pid` int(4) DEFAULT NULL COMMENT 'pid of task',
`app_link` varchar(255) DEFAULT NULL COMMENT 'yarn app id',
`flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available',
`retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ',
`max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times',
`task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest',
`worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id',
`executor_id` int(11) DEFAULT NULL,
PRIMARY KEY (`id`),
KEY `process_instance_id` (`process_instance_id`) USING BTREE,
KEY `task_instance_index` (`process_definition_id`,`process_instance_id`) USING BTREE,
CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_task_instance
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_tenant
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_tenant`;
CREATE TABLE `t_ds_tenant` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code',
`tenant_name` varchar(64) DEFAULT NULL COMMENT 'tenant name',
`description` varchar(256) DEFAULT NULL,
`queue_id` int(11) DEFAULT NULL COMMENT 'queue id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_tenant
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_udfs
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_udfs`;
CREATE TABLE `t_ds_udfs` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key',
`user_id` int(11) NOT NULL COMMENT 'user id',
`func_name` varchar(100) NOT NULL COMMENT 'UDF function name',
`class_name` varchar(255) NOT NULL COMMENT 'class of udf',
`type` tinyint(4) NOT NULL COMMENT 'Udf function type',
`arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types',
`database` varchar(255) DEFAULT NULL COMMENT 'data base',
`description` varchar(255) DEFAULT NULL,
`resource_id` int(11) NOT NULL COMMENT 'resource id',
`resource_name` varchar(255) NOT NULL COMMENT 'resource name',
`create_time` datetime NOT NULL COMMENT 'create time',
`update_time` datetime NOT NULL COMMENT 'update time',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_udfs
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_user
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_user`;
CREATE TABLE `t_ds_user` (
`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id',
`user_name` varchar(64) DEFAULT NULL COMMENT 'user name',
`user_password` varchar(64) DEFAULT NULL COMMENT 'user password',
`user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user',
`email` varchar(64) DEFAULT NULL COMMENT 'email',
`phone` varchar(11) DEFAULT NULL COMMENT 'phone',
`tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id',
`create_time` datetime DEFAULT NULL COMMENT 'create time',
`update_time` datetime DEFAULT NULL COMMENT 'update time',
`queue` varchar(64) DEFAULT NULL COMMENT 'queue',
PRIMARY KEY (`id`),
UNIQUE KEY `user_name_unique` (`user_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
-- ----------------------------
-- Table structure for t_ds_version
-- ----------------------------
DROP TABLE IF EXISTS `t_ds_version`;
CREATE TABLE `t_ds_version` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`version` varchar(200) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `version_UNIQUE` (`version`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version';
-- ----------------------------
-- Records of t_ds_version
-- ----------------------------
INSERT INTO `t_ds_version` VALUES ('1', '2.0.0');
-- ----------------------------
-- Records of t_ds_alertgroup
-- ----------------------------
INSERT INTO `t_ds_alertgroup` VALUES ('1', 'default admin warning group', '0', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39');
-- ----------------------------
-- Records of t_ds_relation_user_alertgroup
-- ----------------------------
INSERT INTO `t_ds_relation_user_alertgroup` VALUES ('1', '1', '1', '2018-11-29 10:22:33', '2018-11-29 10:22:33');
-- ----------------------------
-- Records of t_ds_user
-- ----------------------------
INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', '[email protected]', 'xx', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null);
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,603 | [BUG] code annotation error | incubator-dolphinscheduler-dev/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java
Source code:
/**
* master server startup
*
* master server not use web service
* @param args arguments
*/
public static void main(String[] args) {
Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER);
new SpringApplicationBuilder(WorkerServer.class).web(WebApplicationType.NONE).run(args);
}
The master in the annotation on lines 74 and 76 should be changed to worker.
| https://github.com/apache/dolphinscheduler/issues/2603 | https://github.com/apache/dolphinscheduler/pull/2614 | 8e080ef6085e285480e906ee8dee301e3790bc02 | 8c8e128d18b2a1e189b05554a8c011749c1408b7 | "2020-05-06T07:20:42Z" | java | "2020-05-08T07:23:31Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.worker;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.remote.NettyRemotingServer;
import org.apache.dolphinscheduler.remote.command.CommandType;
import org.apache.dolphinscheduler.remote.config.NettyServerConfig;
import org.apache.dolphinscheduler.server.worker.config.WorkerConfig;
import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor;
import org.apache.dolphinscheduler.server.worker.processor.TaskKillProcessor;
import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistry;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.WebApplicationType;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.ComponentScan;
import javax.annotation.PostConstruct;
/**
* worker server
*/
@ComponentScan("org.apache.dolphinscheduler")
public class WorkerServer {
/**
* logger
*/
private static final Logger logger = LoggerFactory.getLogger(WorkerServer.class);
/**
* netty remote server
*/
private NettyRemotingServer nettyRemotingServer;
/**
* worker registry
*/
@Autowired
private WorkerRegistry workerRegistry;
/**
* worker config
*/
@Autowired
private WorkerConfig workerConfig;
/**
* spring application context
* only use it for initialization
*/
@Autowired
private SpringApplicationContext springApplicationContext;
/**
* worker server startup
*
* worker server not use web service
* @param args arguments
*/
public static void main(String[] args) {
Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER);
new SpringApplicationBuilder(WorkerServer.class).web(WebApplicationType.NONE).run(args);
}
/**
* worker server run
*/
@PostConstruct
public void run(){
logger.info("start worker server...");
//init remoting server
NettyServerConfig serverConfig = new NettyServerConfig();
serverConfig.setListenPort(workerConfig.getListenPort());
this.nettyRemotingServer = new NettyRemotingServer(serverConfig);
this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, new TaskExecuteProcessor());
this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_REQUEST, new TaskKillProcessor());
this.nettyRemotingServer.start();
// worker registry
this.workerRegistry.registry();
/**
* register hooks, which are called before the process exits
*/
Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
@Override
public void run() {
close("shutdownHook");
}
}));
}
public void close(String cause) {
try {
//execute only once
if(Stopper.isStopped()){
return;
}
logger.info("worker server is stopping ..., cause : {}", cause);
// set stop signal is true
Stopper.stop();
try {
//thread sleep 3 seconds for thread quitely stop
Thread.sleep(3000L);
}catch (Exception e){
logger.warn("thread sleep exception", e);
}
this.nettyRemotingServer.close();
this.workerRegistry.unRegistry();
} catch (Exception e) {
logger.error("worker server stop exception ", e);
System.exit(-1);
}
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,581 | [BUG] When the process involves sub-processes, the complement operation will only take the start time, and will repeat the number of days to perform the complement | *For better global communication, please give priority to using English description, thx! *
**Describe the bug**
When the process involves sub-processes, the complement operation will only take the start time, and will repeat the number of days to perform the complement.
**To Reproduce**
Steps to reproduce the behavior, for example:
1. Create a simple sub-process, such as a shell
2. Create another main process and associate sub-processes
3. For complement operation, you can also make up only two days
The specific operation is as follows:
1. Create subprocess,
![image](https://user-images.githubusercontent.com/10054719/80662237-5e165880-8ac3-11ea-9e47-ea97af1179ab.png)
2. Save subprocess and setting variables
![image](https://user-images.githubusercontent.com/10054719/80662384-b6e5f100-8ac3-11ea-84f5-a307acc79f88.png)
3. Create the main process and create sub node as in main process
![image](https://user-images.githubusercontent.com/10054719/80662453-d715b000-8ac3-11ea-8186-b7a92a709c79.png)
![image](https://user-images.githubusercontent.com/10054719/80662492-f0b6f780-8ac3-11ea-9c0c-ed628b82772c.png)
4. Save main process and setting variables
![image](https://user-images.githubusercontent.com/10054719/80662523-07f5e500-8ac4-11ea-8b15-8df097cc5c76.png)
5. As follow.
![image](https://user-images.githubusercontent.com/10054719/80662545-193ef180-8ac4-11ea-8343-802808ad278f.png)
6. Then complement
![image](https://user-images.githubusercontent.com/10054719/80662594-3b387400-8ac4-11ea-96b1-75fa364ca167.png)
7. The result is as follows, the date printed is only the start date
![image](https://user-images.githubusercontent.com/10054719/80662665-67ec8b80-8ac4-11ea-9ae2-d082087efe6b.png)
![image](https://user-images.githubusercontent.com/10054719/80662670-6d49d600-8ac4-11ea-8487-608327a30654.png)
![image](https://user-images.githubusercontent.com/10054719/80662674-70dd5d00-8ac4-11ea-914d-096c12cbfb9c.png)
8. The timing of the main process is correct
![image](https://user-images.githubusercontent.com/10054719/80662741-9bc7b100-8ac4-11ea-9d4d-e05c54ca5a86.png)
**Which version of Dolphin Scheduler:**
-[1.2.1]
- Stand-alone deployment
| https://github.com/apache/dolphinscheduler/issues/2581 | https://github.com/apache/dolphinscheduler/pull/2656 | 050dae7ca4a1b6243fbc0dd9c4018d45e1655566 | d54aab5cd530b2e135a58fbd6b22564583b5abb8 | "2020-04-30T01:27:32Z" | java | "2020-05-09T07:03:02Z" | dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.service.process;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.cronutils.model.Cron;
import org.apache.commons.lang.ArrayUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.model.DateInterval;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.quartz.CronExpression;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import java.util.*;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.toSet;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* process relative dao that some mappers in this.
*/
@Component
public class ProcessService {
private final Logger logger = LoggerFactory.getLogger(getClass());
private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(),
ExecutionStatus.RUNNING_EXEUTION.ordinal(),
ExecutionStatus.READY_PAUSE.ordinal(),
ExecutionStatus.READY_STOP.ordinal()};
@Autowired
private UserMapper userMapper;
@Autowired
private ProcessDefinitionMapper processDefineMapper;
@Autowired
private ProcessInstanceMapper processInstanceMapper;
@Autowired
private DataSourceMapper dataSourceMapper;
@Autowired
private ProcessInstanceMapMapper processInstanceMapMapper;
@Autowired
private TaskInstanceMapper taskInstanceMapper;
@Autowired
private CommandMapper commandMapper;
@Autowired
private ScheduleMapper scheduleMapper;
@Autowired
private UdfFuncMapper udfFuncMapper;
@Autowired
private ResourceMapper resourceMapper;
@Autowired
private ErrorCommandMapper errorCommandMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private ProjectMapper projectMapper;
/**
* handle Command (construct ProcessInstance from Command) , wrapped in transaction
* @param logger logger
* @param host host
* @param validThreadNum validThreadNum
* @param command found command
* @return process instance
*/
@Transactional(rollbackFor = Exception.class)
public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) {
ProcessInstance processInstance = constructProcessInstance(command, host);
//cannot construct process instance, return null;
if(processInstance == null){
logger.error("scan command, command parameter is error: {}", command);
moveToErrorCommand(command, "process instance is null");
return null;
}
if(!checkThreadNum(command, validThreadNum)){
logger.info("there is not enough thread for this command: {}", command);
return setWaitingThreadProcess(command, processInstance);
}
if (processInstance.getCommandType().equals(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS)){
delCommandByid(command.getId());
return null;
}
processInstance.setCommandType(command.getCommandType());
processInstance.addHistoryCmd(command.getCommandType());
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
delCommandByid(command.getId());
return processInstance;
}
/**
* save error command, and delete original command
* @param command command
* @param message message
*/
@Transactional(rollbackFor = Exception.class)
public void moveToErrorCommand(Command command, String message) {
ErrorCommand errorCommand = new ErrorCommand(command, message);
this.errorCommandMapper.insert(errorCommand);
delCommandByid(command.getId());
}
/**
* set process waiting thread
* @param command command
* @param processInstance processInstance
* @return process instance
*/
private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) {
processInstance.setState(ExecutionStatus.WAITTING_THREAD);
if(command.getCommandType() != CommandType.RECOVER_WAITTING_THREAD){
processInstance.addHistoryCmd(command.getCommandType());
}
saveProcessInstance(processInstance);
this.setSubProcessParam(processInstance);
createRecoveryWaitingThreadCommand(command, processInstance);
return null;
}
/**
* check thread num
* @param command command
* @param validThreadNum validThreadNum
* @return if thread is enough
*/
private boolean checkThreadNum(Command command, int validThreadNum) {
int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionId());
return validThreadNum >= commandThreadCount;
}
/**
* insert one command
* @param command command
* @return create result
*/
public int createCommand(Command command) {
int result = 0;
if (command != null){
result = commandMapper.insert(command);
}
return result;
}
/**
* find one command from queue list
* @return command
*/
public Command findOneCommand(){
return commandMapper.getOneToRun();
}
/**
* check the input command exists in queue list
* @param command command
* @return create command result
*/
public Boolean verifyIsNeedCreateCommand(Command command){
Boolean isNeedCreate = true;
Map<CommandType,Integer> cmdTypeMap = new HashMap<CommandType,Integer>();
cmdTypeMap.put(CommandType.REPEAT_RUNNING,1);
cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS,1);
cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS,1);
CommandType commandType = command.getCommandType();
if(cmdTypeMap.containsKey(commandType)){
JSONObject cmdParamObj = (JSONObject) JSON.parse(command.getCommandParam());
JSONObject tempObj;
int processInstanceId = cmdParamObj.getInteger(CMDPARAM_RECOVER_PROCESS_ID_STRING);
List<Command> commands = commandMapper.selectList(null);
// for all commands
for (Command tmpCommand:commands){
if(cmdTypeMap.containsKey(tmpCommand.getCommandType())){
tempObj = (JSONObject) JSON.parse(tmpCommand.getCommandParam());
if(tempObj != null && processInstanceId == tempObj.getInteger(CMDPARAM_RECOVER_PROCESS_ID_STRING)){
isNeedCreate = false;
break;
}
}
}
}
return isNeedCreate;
}
/**
* find process instance detail by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceDetailById(int processId){
return processInstanceMapper.queryDetailById(processId);
}
/**
* get task node list by definitionId
* @param defineId
* @return
*/
public List<TaskNode> getTaskNodeListByDefinitionId(Integer defineId){
ProcessDefinition processDefinition = processDefineMapper.selectById(defineId);
if (processDefinition == null) {
logger.info("process define not exists");
return null;
}
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
if (null == processData) {
logger.error("process data is null");
return null;
}
return processData.getTasks();
}
/**
* find process instance by id
* @param processId processId
* @return process instance
*/
public ProcessInstance findProcessInstanceById(int processId){
return processInstanceMapper.selectById(processId);
}
/**
* find process define by id.
* @param processDefinitionId processDefinitionId
* @return process definition
*/
public ProcessDefinition findProcessDefineById(int processDefinitionId) {
return processDefineMapper.selectById(processDefinitionId);
}
/**
* delete work process instance by id
* @param processInstanceId processInstanceId
* @return delete process instance result
*/
public int deleteWorkProcessInstanceById(int processInstanceId){
return processInstanceMapper.deleteById(processInstanceId);
}
/**
* delete all sub process by parent instance id
* @param processInstanceId processInstanceId
* @return delete all sub process instance result
*/
public int deleteAllSubWorkProcessByParentId(int processInstanceId){
List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId);
for(Integer subId : subProcessIdList ){
deleteAllSubWorkProcessByParentId(subId);
deleteWorkProcessMapByParentId(subId);
deleteWorkProcessInstanceById(subId);
}
return 1;
}
/**
* calculate sub process number in the process define.
* @param processDefinitionId processDefinitionId
* @return process thread num count
*/
private Integer workProcessThreadNumCount(Integer processDefinitionId){
List<Integer> ids = new ArrayList<>();
recurseFindSubProcessId(processDefinitionId, ids);
return ids.size()+1;
}
/**
* recursive query sub process definition id by parent id.
* @param parentId parentId
* @param ids ids
*/
public void recurseFindSubProcessId(int parentId, List<Integer> ids){
ProcessDefinition processDefinition = processDefineMapper.selectById(parentId);
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
List<TaskNode> taskNodeList = processData.getTasks();
if (taskNodeList != null && taskNodeList.size() > 0){
for (TaskNode taskNode : taskNodeList){
String parameter = taskNode.getParams();
if (parameter.contains(CMDPARAM_SUB_PROCESS_DEFINE_ID)){
SubProcessParameters subProcessParam = JSON.parseObject(parameter, SubProcessParameters.class);
ids.add(subProcessParam.getProcessDefinitionId());
recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(),ids);
}
}
}
}
/**
* create recovery waiting thread command when thread pool is not enough for the process instance.
* sub work process instance need not to create recovery command.
* create recovery waiting thread command and delete origin command at the same time.
* if the recovery command is exists, only update the field update_time
* @param originCommand originCommand
* @param processInstance processInstance
*/
public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) {
// sub process doesnot need to create wait command
if(processInstance.getIsSubProcess() == Flag.YES){
if(originCommand != null){
commandMapper.deleteById(originCommand.getId());
}
return;
}
Map<String, String> cmdParam = new HashMap<>();
cmdParam.put(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD, String.valueOf(processInstance.getId()));
// process instance quit by "waiting thread" state
if(originCommand == null){
Command command = new Command(
CommandType.RECOVER_WAITTING_THREAD,
processInstance.getTaskDependType(),
processInstance.getFailureStrategy(),
processInstance.getExecutorId(),
processInstance.getProcessDefinitionId(),
JSONUtils.toJson(cmdParam),
processInstance.getWarningType(),
processInstance.getWarningGroupId(),
processInstance.getScheduleTime(),
processInstance.getProcessInstancePriority()
);
saveCommand(command);
return ;
}
// update the command time if current command if recover from waiting
if(originCommand.getCommandType() == CommandType.RECOVER_WAITTING_THREAD){
originCommand.setUpdateTime(new Date());
saveCommand(originCommand);
}else{
// delete old command and create new waiting thread command
commandMapper.deleteById(originCommand.getId());
originCommand.setId(0);
originCommand.setCommandType(CommandType.RECOVER_WAITTING_THREAD);
originCommand.setUpdateTime(new Date());
originCommand.setCommandParam(JSONUtils.toJson(cmdParam));
originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority());
saveCommand(originCommand);
}
}
/**
* get schedule time from command
* @param command command
* @param cmdParam cmdParam map
* @return date
*/
private Date getScheduleTime(Command command, Map<String, String> cmdParam){
Date scheduleTime = command.getScheduleTime();
if(scheduleTime == null){
if(cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)){
scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
}
}
return scheduleTime;
}
/**
* generate a new work process instance from command.
* @param processDefinition processDefinition
* @param command command
* @param cmdParam cmdParam map
* @return process instance
*/
private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition,
Command command,
Map<String, String> cmdParam){
ProcessInstance processInstance = new ProcessInstance(processDefinition);
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setRecovery(Flag.NO);
processInstance.setStartTime(new Date());
processInstance.setRunTimes(1);
processInstance.setMaxTryTimes(0);
processInstance.setProcessDefinitionId(command.getProcessDefinitionId());
processInstance.setCommandParam(command.getCommandParam());
processInstance.setCommandType(command.getCommandType());
processInstance.setIsSubProcess(Flag.NO);
processInstance.setTaskDependType(command.getTaskDependType());
processInstance.setFailureStrategy(command.getFailureStrategy());
processInstance.setExecutorId(command.getExecutorId());
WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType();
processInstance.setWarningType(warningType);
Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId();
processInstance.setWarningGroupId(warningGroupId);
// schedule time
Date scheduleTime = getScheduleTime(command, cmdParam);
if(scheduleTime != null){
processInstance.setScheduleTime(scheduleTime);
}
processInstance.setCommandStartTime(command.getStartTime());
processInstance.setLocations(processDefinition.getLocations());
processInstance.setConnects(processDefinition.getConnects());
// curing global params
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
getCommandTypeIfComplement(processInstance, command),
processInstance.getScheduleTime()));
//copy process define json to process instance
processInstance.setProcessInstanceJson(processDefinition.getProcessDefinitionJson());
// set process instance priority
processInstance.setProcessInstancePriority(command.getProcessInstancePriority());
String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup();
processInstance.setWorkerGroup(workerGroup);
processInstance.setTimeout(processDefinition.getTimeout());
processInstance.setTenantId(processDefinition.getTenantId());
return processInstance;
}
/**
* get process tenant
* there is tenant id in definition, use the tenant of the definition.
* if there is not tenant id in the definiton or the tenant not exist
* use definition creator's tenant.
* @param tenantId tenantId
* @param userId userId
* @return tenant
*/
public Tenant getTenantForProcess(int tenantId, int userId){
Tenant tenant = null;
if(tenantId >= 0){
tenant = tenantMapper.queryById(tenantId);
}
if (userId == 0){
return null;
}
if(tenant == null){
User user = userMapper.selectById(userId);
tenant = tenantMapper.queryById(user.getTenantId());
}
return tenant;
}
/**
* check command parameters is valid
* @param command command
* @param cmdParam cmdParam map
* @return whether command param is valid
*/
private Boolean checkCmdParam(Command command, Map<String, String> cmdParam){
if(command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType()== TaskDependType.TASK_PRE){
if(cmdParam == null
|| !cmdParam.containsKey(Constants.CMDPARAM_START_NODE_NAMES)
|| cmdParam.get(Constants.CMDPARAM_START_NODE_NAMES).isEmpty()){
logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType());
return false;
}
}
return true;
}
/**
* construct process instance according to one command.
* @param command command
* @param host host
* @return process instance
*/
private ProcessInstance constructProcessInstance(Command command, String host){
ProcessInstance processInstance = null;
CommandType commandType = command.getCommandType();
Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam());
ProcessDefinition processDefinition = null;
if(command.getProcessDefinitionId() != 0){
processDefinition = processDefineMapper.selectById(command.getProcessDefinitionId());
if(processDefinition == null){
logger.error("cannot find the work process define! define id : {}", command.getProcessDefinitionId());
return null;
}
}
if(cmdParam != null ){
Integer processInstanceId = 0;
// recover from failure or pause tasks
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING)) {
String processId = cmdParam.get(Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING);
processInstanceId = Integer.parseInt(processId);
if (processInstanceId == 0) {
logger.error("command parameter is error, [ ProcessInstanceId ] is 0");
return null;
}
}else if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
// sub process map
String pId = cmdParam.get(Constants.CMDPARAM_SUB_PROCESS);
processInstanceId = Integer.parseInt(pId);
}else if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD)){
// waiting thread command
String pId = cmdParam.get(Constants.CMDPARAM_RECOVERY_WAITTING_THREAD);
processInstanceId = Integer.parseInt(pId);
}
if(processInstanceId ==0){
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}else{
processInstance = this.findProcessInstanceDetailById(processInstanceId);
}
processDefinition = processDefineMapper.selectById(processInstance.getProcessDefinitionId());
processInstance.setProcessDefinition(processDefinition);
//reset command parameter
if(processInstance.getCommandParam() != null){
Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam());
for(Map.Entry<String, String> entry: processCmdParam.entrySet()) {
if(!cmdParam.containsKey(entry.getKey())){
cmdParam.put(entry.getKey(), entry.getValue());
}
}
}
// reset command parameter if sub process
if(cmdParam.containsKey(Constants.CMDPARAM_SUB_PROCESS)){
processInstance.setCommandParam(command.getCommandParam());
}
}else{
// generate one new process instance
processInstance = generateNewProcessInstance(processDefinition, command, cmdParam);
}
if(!checkCmdParam(command, cmdParam)){
logger.error("command parameter check failed!");
return null;
}
if(command.getScheduleTime() != null){
processInstance.setScheduleTime(command.getScheduleTime());
}
processInstance.setHost(host);
ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXEUTION;
int runTime = processInstance.getRunTimes();
switch (commandType){
case START_PROCESS:
break;
case START_FAILURE_TASK_PROCESS:
// find failed tasks and init these tasks
List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE);
List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE);
List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL);
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
failedList.addAll(killedList);
failedList.addAll(toleranceList);
for(Integer taskId : failedList){
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING,
String.join(Constants.COMMA, convertIntListToString(failedList)));
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
processInstance.setRunTimes(runTime +1 );
break;
case START_CURRENT_TASK_PROCESS:
break;
case RECOVER_WAITTING_THREAD:
break;
case RECOVER_SUSPENDED_PROCESS:
// find pause tasks and init task's state
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE);
List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(),
ExecutionStatus.KILL);
suspendedNodeList.addAll(stopNodeList);
for(Integer taskId : suspendedNodeList){
// initialize the pause state
initTaskInstance(this.findTaskInstanceById(taskId));
}
cmdParam.put(Constants.CMDPARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList)));
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
processInstance.setRunTimes(runTime +1);
break;
case RECOVER_TOLERANCE_FAULT_PROCESS:
// recover tolerance fault process
processInstance.setRecovery(Flag.YES);
runStatus = processInstance.getState();
break;
case COMPLEMENT_DATA:
// delete all the valid tasks when complement data
List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
taskInstance.setFlag(Flag.NO);
this.updateTaskInstance(taskInstance);
}
break;
case REPEAT_RUNNING:
// delete the recover task names from command parameter
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
// delete all the valid tasks when repeat running
List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : validTaskList){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
}
processInstance.setStartTime(new Date());
processInstance.setEndTime(null);
processInstance.setRunTimes(runTime +1);
initComplementDataParam(processDefinition, processInstance, cmdParam);
break;
case SCHEDULER:
break;
default:
break;
}
processInstance.setState(runStatus);
return processInstance;
}
/**
* return complement data if the process start with complement data
* @param processInstance processInstance
* @param command command
* @return command type
*/
private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command){
if(CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()){
return CommandType.COMPLEMENT_DATA;
}else{
return command.getCommandType();
}
}
/**
* initialize complement data parameters
* @param processDefinition processDefinition
* @param processInstance processInstance
* @param cmdParam cmdParam
*/
private void initComplementDataParam(ProcessDefinition processDefinition,
ProcessInstance processInstance,
Map<String, String> cmdParam) {
if(!processInstance.isComplementData()){
return;
}
Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE),
YYYY_MM_DD_HH_MM_SS);
processInstance.setScheduleTime(startComplementTime);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processDefinition.getGlobalParamMap(),
processDefinition.getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
}
/**
* set sub work process parameters.
* handle sub work process instance, update relation table and command parameters
* set sub work process flag, extends parent work process command parameters
* @param subProcessInstance subProcessInstance
* @return process instance
*/
public ProcessInstance setSubProcessParam(ProcessInstance subProcessInstance){
String cmdParam = subProcessInstance.getCommandParam();
if(StringUtils.isEmpty(cmdParam)){
return subProcessInstance;
}
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
// write sub process id into cmd param.
if(paramMap.containsKey(CMDPARAM_SUB_PROCESS)
&& CMDPARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMDPARAM_SUB_PROCESS))){
paramMap.remove(CMDPARAM_SUB_PROCESS);
paramMap.put(CMDPARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId()));
subProcessInstance.setCommandParam(JSONUtils.toJson(paramMap));
subProcessInstance.setIsSubProcess(Flag.YES);
this.saveProcessInstance(subProcessInstance);
}
// copy parent instance user def params to sub process..
String parentInstanceId = paramMap.get(CMDPARAM_SUB_PROCESS_PARENT_INSTANCE_ID);
if(StringUtils.isNotEmpty(parentInstanceId)){
ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId));
if(parentInstance != null){
subProcessInstance.setGlobalParams(
joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams()));
this.saveProcessInstance(subProcessInstance);
}else{
logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam);
}
}
ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class);
if(processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0){
return subProcessInstance;
}
// update sub process id to process map table
processInstanceMap.setProcessInstanceId(subProcessInstance.getId());
this.updateWorkProcessInstanceMap(processInstanceMap);
return subProcessInstance;
}
/**
* join parent global params into sub process.
* only the keys doesn't in sub process global would be joined.
* @param parentGlobalParams parentGlobalParams
* @param subGlobalParams subGlobalParams
* @return global params join
*/
private String joinGlobalParams(String parentGlobalParams, String subGlobalParams){
List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class);
List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class);
Map<String,String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue));
for(Property parent : parentPropertyList){
if(!subMap.containsKey(parent.getProp())){
subPropertyList.add(parent);
}
}
return JSONUtils.toJson(subPropertyList);
}
/**
* initialize task instance
* @param taskInstance taskInstance
*/
private void initTaskInstance(TaskInstance taskInstance){
if(!taskInstance.isSubProcess()){
if(taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure()){
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
return;
}
}
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateTaskInstance(taskInstance);
}
/**
* submit task to db
* submit sub process to command
* @param taskInstance taskInstance
* @return task instance
*/
@Transactional(rollbackFor = Exception.class)
public TaskInstance submitTask(TaskInstance taskInstance){
ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
logger.info("start submit task : {}, instance id:{}, state: {}",
taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState());
//submit to db
TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance);
if(task == null){
logger.error("end submit task to db error, task name:{}, process id:{} state: {} ",
taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState());
return task;
}
if(!task.getState().typeIsFinished()){
createSubWorkProcessCommand(processInstance, task);
}
logger.info("end submit task to db successfully:{} state:{} complete, instance id:{} state: {} ",
taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState());
return task;
}
/**
* set work process instance map
* @param parentInstance parentInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask){
ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId());
if(processMap != null){
return processMap;
}else if(parentInstance.getCommandType() == CommandType.REPEAT_RUNNING
|| parentInstance.isComplementData()){
// update current task id to map
// repeat running does not generate new sub process instance
processMap = findPreviousTaskProcessMap(parentInstance, parentTask);
if(processMap!= null){
processMap.setParentTaskInstanceId(parentTask.getId());
updateWorkProcessInstanceMap(processMap);
return processMap;
}
}
// new task
processMap = new ProcessInstanceMap();
processMap.setParentProcessInstanceId(parentInstance.getId());
processMap.setParentTaskInstanceId(parentTask.getId());
createWorkProcessInstanceMap(processMap);
return processMap;
}
/**
* find previous task work process map.
* @param parentProcessInstance parentProcessInstance
* @param parentTask parentTask
* @return process instance map
*/
private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance,
TaskInstance parentTask) {
Integer preTaskId = 0;
List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId());
for(TaskInstance task : preTaskList){
if(task.getName().equals(parentTask.getName())){
preTaskId = task.getId();
ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId);
if(map!=null){
return map;
}
}
}
logger.info("sub process instance is not found,parent task:{},parent instance:{}",
parentTask.getId(), parentProcessInstance.getId());
return null;
}
/**
* create sub work process command
* @param parentProcessInstance parentProcessInstance
* @param task task
*/
private void createSubWorkProcessCommand(ProcessInstance parentProcessInstance,
TaskInstance task){
if(!task.isSubProcess()){
return;
}
ProcessInstanceMap instanceMap = setProcessInstanceMap(parentProcessInstance, task);
TaskNode taskNode = JSONUtils.parseObject(task.getTaskJson(), TaskNode.class);
Map<String, String> subProcessParam = JSONUtils.toMap(taskNode.getParams());
Integer childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMDPARAM_SUB_PROCESS_DEFINE_ID));
ProcessInstance childInstance = findSubProcessInstance(parentProcessInstance.getId(), task.getId());
CommandType fatherType = parentProcessInstance.getCommandType();
CommandType commandType = fatherType;
if(childInstance == null || commandType == CommandType.REPEAT_RUNNING){
String fatherHistoryCommand = parentProcessInstance.getHistoryCmd();
// sub process must begin with schedule/complement data
// if father begin with scheduler/complement data
if(fatherHistoryCommand.startsWith(CommandType.SCHEDULER.toString()) ||
fatherHistoryCommand.startsWith(CommandType.COMPLEMENT_DATA.toString())){
commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]);
}
}
if(childInstance != null){
childInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
updateProcessInstance(childInstance);
}
// set sub work process command
String processMapStr = JSONUtils.toJson(instanceMap);
Map<String, String> cmdParam = JSONUtils.toMap(processMapStr);
if(commandType == CommandType.COMPLEMENT_DATA ||
(childInstance != null && childInstance.isComplementData())){
Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam());
String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE);
String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime);
cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime);
processMapStr = JSONUtils.toJson(cmdParam);
}
updateSubProcessDefinitionByParent(parentProcessInstance, childDefineId);
Command command = new Command();
command.setWarningType(parentProcessInstance.getWarningType());
command.setWarningGroupId(parentProcessInstance.getWarningGroupId());
command.setFailureStrategy(parentProcessInstance.getFailureStrategy());
command.setProcessDefinitionId(childDefineId);
command.setScheduleTime(parentProcessInstance.getScheduleTime());
command.setExecutorId(parentProcessInstance.getExecutorId());
command.setCommandParam(processMapStr);
command.setCommandType(commandType);
command.setProcessInstancePriority(parentProcessInstance.getProcessInstancePriority());
createCommand(command);
logger.info("sub process command created: {} ", command.toString());
}
/**
* update sub process definition
* @param parentProcessInstance parentProcessInstance
* @param childDefinitionId childDefinitionId
*/
private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, int childDefinitionId) {
ProcessDefinition fatherDefinition = this.findProcessDefineById(parentProcessInstance.getProcessDefinitionId());
ProcessDefinition childDefinition = this.findProcessDefineById(childDefinitionId);
if(childDefinition != null && fatherDefinition != null){
childDefinition.setReceivers(fatherDefinition.getReceivers());
childDefinition.setReceiversCc(fatherDefinition.getReceiversCc());
processDefineMapper.updateById(childDefinition);
}
}
/**
* submit task to mysql
* @param taskInstance taskInstance
* @param processInstance processInstance
* @return task instance
*/
public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance){
ExecutionStatus processInstanceState = processInstance.getState();
if(taskInstance.getState().typeIsFailure()){
if(taskInstance.isSubProcess()){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}else {
if( processInstanceState != ExecutionStatus.READY_STOP
&& processInstanceState != ExecutionStatus.READY_PAUSE){
// failure task set invalid
taskInstance.setFlag(Flag.NO);
updateTaskInstance(taskInstance);
// crate new task instance
if(taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE){
taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1 );
}
taskInstance.setEndTime(null);
taskInstance.setStartTime(new Date());
taskInstance.setFlag(Flag.YES);
taskInstance.setHost(null);
taskInstance.setId(0);
}
}
}
taskInstance.setExecutorId(processInstance.getExecutorId());
taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority());
taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState));
taskInstance.setSubmitTime(new Date());
boolean saveResult = saveTaskInstance(taskInstance);
if(!saveResult){
return null;
}
return taskInstance;
}
/**
* ${processInstancePriority}_${processInstanceId}_${taskInstancePriority}_${taskInstanceId}_${task executed by ip1},${ip2}...
* The tasks with the highest priority are selected by comparing the priorities of the above four levels from high to low.
* @param taskInstance taskInstance
* @return task zk queue str
*/
public String taskZkInfo(TaskInstance taskInstance) {
String taskWorkerGroup = getTaskWorkerGroup(taskInstance);
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if(processInstance == null){
logger.error("process instance is null. please check the task info, task id: " + taskInstance.getId());
return "";
}
StringBuilder sb = new StringBuilder(100);
sb.append(processInstance.getProcessInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getProcessInstanceId()).append(Constants.UNDERLINE)
.append(taskInstance.getTaskInstancePriority().ordinal()).append(Constants.UNDERLINE)
.append(taskInstance.getId()).append(Constants.UNDERLINE)
.append(taskInstance.getWorkerGroup());
return sb.toString();
}
/**
* get submit task instance state by the work process state
* cannot modify the task state when running/kill/submit success, or this
* task instance is already exists in task queue .
* return pause if work process state is ready pause
* return stop if work process state is ready stop
* if all of above are not satisfied, return submit success
*
* @param taskInstance taskInstance
* @param processInstanceState processInstanceState
* @return process instance state
*/
public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState){
ExecutionStatus state = taskInstance.getState();
if(
// running or killed
// the task already exists in task queue
// return state
state == ExecutionStatus.RUNNING_EXEUTION
|| state == ExecutionStatus.KILL
|| checkTaskExistsInTaskQueue(taskInstance)
){
return state;
}
//return pasue /stop if process instance state is ready pause / stop
// or return submit success
if( processInstanceState == ExecutionStatus.READY_PAUSE){
state = ExecutionStatus.PAUSE;
}else if(processInstanceState == ExecutionStatus.READY_STOP
|| !checkProcessStrategy(taskInstance)) {
state = ExecutionStatus.KILL;
}else{
state = ExecutionStatus.SUBMITTED_SUCCESS;
}
return state;
}
/**
* check process instance strategy
* @param taskInstance taskInstance
* @return check strategy result
*/
private boolean checkProcessStrategy(TaskInstance taskInstance){
ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
FailureStrategy failureStrategy = processInstance.getFailureStrategy();
if(failureStrategy == FailureStrategy.CONTINUE){
return true;
}
List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId());
for(TaskInstance task : taskInstances){
if(task.getState() == ExecutionStatus.FAILURE){
return false;
}
}
return true;
}
/**
* check the task instance existing in queue
* @param taskInstance taskInstance
* @return whether taskinstance exists queue
*/
public boolean checkTaskExistsInTaskQueue(TaskInstance taskInstance){
if(taskInstance.isSubProcess()){
return false;
}
String taskZkInfo = taskZkInfo(taskInstance);
return false;
}
/**
* create a new process instance
* @param processInstance processInstance
*/
public void createProcessInstance(ProcessInstance processInstance){
if (processInstance != null){
processInstanceMapper.insert(processInstance);
}
}
/**
* insert or update work process instance to data base
* @param processInstance processInstance
*/
public void saveProcessInstance(ProcessInstance processInstance){
if (processInstance == null){
logger.error("save error, process instance is null!");
return ;
}
if(processInstance.getId() != 0){
processInstanceMapper.updateById(processInstance);
}else{
createProcessInstance(processInstance);
}
}
/**
* insert or update command
* @param command command
* @return save command result
*/
public int saveCommand(Command command){
if(command.getId() != 0){
return commandMapper.updateById(command);
}else{
return commandMapper.insert(command);
}
}
/**
* insert or update task instance
* @param taskInstance taskInstance
* @return save task instance result
*/
public boolean saveTaskInstance(TaskInstance taskInstance){
if(taskInstance.getId() != 0){
return updateTaskInstance(taskInstance);
}else{
return createTaskInstance(taskInstance);
}
}
/**
* insert task instance
* @param taskInstance taskInstance
* @return create task instance result
*/
public boolean createTaskInstance(TaskInstance taskInstance) {
int count = taskInstanceMapper.insert(taskInstance);
return count > 0;
}
/**
* update task instance
* @param taskInstance taskInstance
* @return update task instance result
*/
public boolean updateTaskInstance(TaskInstance taskInstance){
int count = taskInstanceMapper.updateById(taskInstance);
return count > 0;
}
/**
* delete a command by id
* @param id id
*/
public void delCommandByid(int id) {
commandMapper.deleteById(id);
}
/**
* find task instance by id
* @param taskId task id
* @return task intance
*/
public TaskInstance findTaskInstanceById(Integer taskId){
return taskInstanceMapper.selectById(taskId);
}
/**
* package task instance,associate processInstance and processDefine
* @param taskInstId taskInstId
* @return task instance
*/
public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId){
// get task instance
TaskInstance taskInstance = findTaskInstanceById(taskInstId);
if(taskInstance == null){
return taskInstance;
}
// get process instance
ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId());
// get process define
ProcessDefinition processDefine = findProcessDefineById(taskInstance.getProcessDefinitionId());
taskInstance.setProcessInstance(processInstance);
taskInstance.setProcessDefine(processDefine);
return taskInstance;
}
/**
* get id list by task state
* @param instanceId instanceId
* @param state state
* @return task instance states
*/
public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state){
return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal());
}
/**
* find valid task list by process definition id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES);
}
/**
* find previous task list by work process id
* @param processInstanceId processInstanceId
* @return task instance list
*/
public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId){
return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO);
}
/**
* update work process instance map
* @param processInstanceMap processInstanceMap
* @return update process instance result
*/
public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
return processInstanceMapMapper.updateById(processInstanceMap);
}
/**
* create work process instance map
* @param processInstanceMap processInstanceMap
* @return create process instance result
*/
public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap){
Integer count = 0;
if(processInstanceMap !=null){
return processInstanceMapMapper.insert(processInstanceMap);
}
return count;
}
/**
* find work process map by parent process id and parent task id.
* @param parentWorkProcessId parentWorkProcessId
* @param parentTaskId parentTaskId
* @return process instance map
*/
public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId){
return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId);
}
/**
* delete work process map by parent process id
* @param parentWorkProcessId parentWorkProcessId
* @return delete process map result
*/
public int deleteWorkProcessMapByParentId(int parentWorkProcessId){
return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId);
}
/**
* find sub process instance
* @param parentProcessId parentProcessId
* @param parentTaskId parentTaskId
* @return process instance
*/
public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId){
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId());
return processInstance;
}
/**
* find parent process instance
* @param subProcessId subProcessId
* @return process instance
*/
public ProcessInstance findParentProcessInstance(Integer subProcessId) {
ProcessInstance processInstance = null;
ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId);
if(processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0){
return processInstance;
}
processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId());
return processInstance;
}
/**
* change task state
* @param state state
* @param startTime startTime
* @param host host
* @param executePath executePath
* @param logPath logPath
* @param taskInstId taskInstId
*/
public void changeTaskState(ExecutionStatus state, Date startTime, String host,
String executePath,
String logPath,
int taskInstId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setState(state);
taskInstance.setStartTime(startTime);
taskInstance.setHost(host);
taskInstance.setExecutePath(executePath);
taskInstance.setLogPath(logPath);
saveTaskInstance(taskInstance);
}
/**
* update process instance
* @param processInstance processInstance
* @return update process instance result
*/
public int updateProcessInstance(ProcessInstance processInstance){
return processInstanceMapper.updateById(processInstance);
}
/**
* update the process instance
* @param processInstanceId processInstanceId
* @param processJson processJson
* @param globalParams globalParams
* @param scheduleTime scheduleTime
* @param flag flag
* @param locations locations
* @param connects connects
* @return update process instance result
*/
public int updateProcessInstance(Integer processInstanceId, String processJson,
String globalParams, Date scheduleTime, Flag flag,
String locations, String connects){
ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId);
if(processInstance!= null){
processInstance.setProcessInstanceJson(processJson);
processInstance.setGlobalParams(globalParams);
processInstance.setScheduleTime(scheduleTime);
processInstance.setLocations(locations);
processInstance.setConnects(connects);
return processInstanceMapper.updateById(processInstance);
}
return 0;
}
/**
* change task state
* @param state state
* @param endTime endTime
* @param taskInstId taskInstId
*/
public void changeTaskState(ExecutionStatus state,
Date endTime,
int processId,
String appIds,
int taskInstId) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(processId);
taskInstance.setAppLink(appIds);
taskInstance.setState(state);
taskInstance.setEndTime(endTime);
saveTaskInstance(taskInstance);
}
/**
* convert integer list to string list
* @param intList intList
* @return string list
*/
public List<String> convertIntListToString(List<Integer> intList){
if(intList == null){
return new ArrayList<>();
}
List<String> result = new ArrayList<String>(intList.size());
for(Integer intVar : intList){
result.add(String.valueOf(intVar));
}
return result;
}
/**
* update pid and app links field by task instance id
* @param taskInstId taskInstId
* @param pid pid
* @param appLinks appLinks
*/
public void updatePidByTaskInstId(int taskInstId, int pid,String appLinks) {
TaskInstance taskInstance = taskInstanceMapper.selectById(taskInstId);
taskInstance.setPid(pid);
taskInstance.setAppLink(appLinks);
saveTaskInstance(taskInstance);
}
/**
* query schedule by id
* @param id id
* @return schedule
*/
public Schedule querySchedule(int id) {
return scheduleMapper.selectById(id);
}
/**
* query Schedule by processDefinitionId
* @param processDefinitionId processDefinitionId
* @see Schedule
*/
public List<Schedule> queryReleaseSchedulerListByProcessDefinitionId(int processDefinitionId) {
return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
}
/**
* query need failover process instance
* @param host host
* @return process instance list
*/
public List<ProcessInstance> queryNeedFailoverProcessInstances(String host){
return processInstanceMapper.queryByHostAndStatus(host, stateArray);
}
/**
* process need failover process instance
* @param processInstance processInstance
*/
@Transactional(rollbackFor = Exception.class)
public void processNeedFailoverProcessInstances(ProcessInstance processInstance){
//1 update processInstance host is null
processInstance.setHost("null");
processInstanceMapper.updateById(processInstance);
//2 insert into recover command
Command cmd = new Command();
cmd.setProcessDefinitionId(processInstance.getProcessDefinitionId());
cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMDPARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId()));
cmd.setExecutorId(processInstance.getExecutorId());
cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS);
createCommand(cmd);
}
/**
* query all need failover task instances by host
* @param host host
* @return task instance list
*/
public List<TaskInstance> queryNeedFailoverTaskInstances(String host){
return taskInstanceMapper.queryByHostAndStatus(host,
stateArray);
}
/**
* find data source by id
* @param id id
* @return datasource
*/
public DataSource findDataSourceById(int id){
return dataSourceMapper.selectById(id);
}
/**
* update process instance state by id
* @param processInstanceId processInstanceId
* @param executionStatus executionStatus
* @return update process result
*/
public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) {
ProcessInstance instance = processInstanceMapper.selectById(processInstanceId);
instance.setState(executionStatus);
return processInstanceMapper.updateById(instance);
}
/**
* find process instance by the task id
* @param taskId taskId
* @return process instance
*/
public ProcessInstance findProcessInstanceByTaskId(int taskId){
TaskInstance taskInstance = taskInstanceMapper.selectById(taskId);
if(taskInstance!= null){
return processInstanceMapper.selectById(taskInstance.getProcessInstanceId());
}
return null;
}
/**
* find udf function list by id list string
* @param ids ids
* @return udf function list
*/
public List<UdfFunc> queryUdfFunListByids(int[] ids){
return udfFuncMapper.queryUdfByIdStr(ids, null);
}
/**
* find tenant code by resource name
* @param resName resource name
* @param resourceType resource type
* @return tenant code
*/
public String queryTenantCodeByResName(String resName,ResourceType resourceType){
return resourceMapper.queryTenantCodeByResourceName(resName, resourceType.ordinal());
}
/**
* find schedule list by process define id.
* @param ids ids
* @return schedule list
*/
public List<Schedule> selectAllByProcessDefineId(int[] ids){
return scheduleMapper.selectAllByProcessDefineArray(
ids);
}
/**
* get dependency cycle by work process define id and scheduler fire time
* @param masterId masterId
* @param processDefinitionId processDefinitionId
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency
* @throws Exception if error throws Exception
*/
public CycleDependency getCycleDependency(int masterId, int processDefinitionId, Date scheduledFireTime) throws Exception {
List<CycleDependency> list = getCycleDependencies(masterId,new int[]{processDefinitionId},scheduledFireTime);
return list.size()>0 ? list.get(0) : null;
}
/**
* get dependency cycle list by work process define id list and scheduler fire time
* @param masterId masterId
* @param ids ids
* @param scheduledFireTime the time the task schedule is expected to trigger
* @return CycleDependency list
* @throws Exception if error throws Exception
*/
public List<CycleDependency> getCycleDependencies(int masterId,int[] ids,Date scheduledFireTime) throws Exception {
List<CycleDependency> cycleDependencyList = new ArrayList<CycleDependency>();
if(ArrayUtils.isEmpty(ids)){
logger.warn("ids[] is empty!is invalid!");
return cycleDependencyList;
}
if(scheduledFireTime == null){
logger.warn("scheduledFireTime is null!is invalid!");
return cycleDependencyList;
}
String strCrontab = "";
CronExpression depCronExpression;
Cron depCron;
List<Date> list;
List<Schedule> schedules = this.selectAllByProcessDefineId(ids);
// for all scheduling information
for(Schedule depSchedule:schedules){
strCrontab = depSchedule.getCrontab();
depCronExpression = CronUtils.parse2CronExpression(strCrontab);
depCron = CronUtils.parse2Cron(strCrontab);
CycleEnum cycleEnum = CronUtils.getMiniCycle(depCron);
if(cycleEnum == null){
logger.error("{} is not valid",strCrontab);
continue;
}
Calendar calendar = Calendar.getInstance();
switch (cycleEnum){
/*case MINUTE:
calendar.add(Calendar.MINUTE,-61);*/
case HOUR:
calendar.add(Calendar.HOUR,-25);
break;
case DAY:
calendar.add(Calendar.DATE,-32);
break;
case WEEK:
calendar.add(Calendar.DATE,-32);
break;
case MONTH:
calendar.add(Calendar.MONTH,-13);
break;
default:
logger.warn("Dependent process definition's cycleEnum is {},not support!!", cycleEnum.name());
continue;
}
Date start = calendar.getTime();
if(depSchedule.getProcessDefinitionId() == masterId){
list = CronUtils.getSelfFireDateList(start, scheduledFireTime, depCronExpression);
}else {
list = CronUtils.getFireDateList(start, scheduledFireTime, depCronExpression);
}
if(list.size()>=1){
start = list.get(list.size()-1);
CycleDependency dependency = new CycleDependency(depSchedule.getProcessDefinitionId(),start, CronUtils.getExpirationTime(start, cycleEnum), cycleEnum);
cycleDependencyList.add(dependency);
}
}
return cycleDependencyList;
}
/**
* find last scheduler process instance in the date interval
* @param definitionId definitionId
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastSchedulerProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastSchedulerProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last manual process instance interval
* @param definitionId process definition id
* @param dateInterval dateInterval
* @return process instance
*/
public ProcessInstance findLastManualProcessInterval(int definitionId, DateInterval dateInterval) {
return processInstanceMapper.queryLastManualProcess(definitionId,
dateInterval.getStartTime(),
dateInterval.getEndTime());
}
/**
* find last running process instance
* @param definitionId process definition id
* @param startTime start time
* @param endTime end time
* @return process instance
*/
public ProcessInstance findLastRunningProcess(int definitionId, Date startTime, Date endTime) {
return processInstanceMapper.queryLastRunningProcess(definitionId,
startTime,
endTime,
stateArray);
}
/**
* query user queue by process instance id
* @param processInstanceId processInstanceId
* @return queue
*/
public String queryUserQueueByProcessInstanceId(int processInstanceId){
String queue = "";
ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId);
if(processInstance == null){
return queue;
}
User executor = userMapper.selectById(processInstance.getExecutorId());
if(executor != null){
queue = executor.getQueue();
}
return queue;
}
/**
* get task worker group
* @param taskInstance taskInstance
* @return workerGroupId
*/
public String getTaskWorkerGroup(TaskInstance taskInstance) {
String workerGroup = taskInstance.getWorkerGroup();
if(StringUtils.isNotBlank(workerGroup)){
return workerGroup;
}
int processInstanceId = taskInstance.getProcessInstanceId();
ProcessInstance processInstance = findProcessInstanceById(processInstanceId);
if(processInstance != null){
return processInstance.getWorkerGroup();
}
logger.info("task : {} will use default worker group", taskInstance.getId());
return Constants.DEFAULT_WORKER_GROUP;
}
/**
* get have perm project list
* @param userId userId
* @return project list
*/
public List<Project> getProjectListHavePerm(int userId){
List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId);
List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId);
if(createProjects == null){
createProjects = new ArrayList<>();
}
if(authedProjects != null){
createProjects.addAll(authedProjects);
}
return createProjects;
}
/**
* get have perm project ids
* @param userId userId
* @return project ids
*/
public List<Integer> getProjectIdListHavePerm(int userId){
List<Integer> projectIdList = new ArrayList<>();
for(Project project : getProjectListHavePerm(userId)){
projectIdList.add(project.getId());
}
return projectIdList;
}
/**
* list unauthorized udf function
* @param userId user id
* @param needChecks data source id array
* @return unauthorized udf function list
*/
public <T> List<T> listUnauthorized(int userId,T[] needChecks,AuthorizationType authorizationType){
List<T> resultList = new ArrayList<T>();
if (!ArrayUtils.isEmpty(needChecks)) {
Set<T> originResSet = new HashSet<T>(Arrays.asList(needChecks));
switch (authorizationType){
case RESOURCE_FILE_ID:
Set<Integer> authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedResourceFiles);
break;
case RESOURCE_FILE_NAME:
Set<String> authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
case UDF_FILE:
Set<Integer> authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfFiles);
break;
case DATASOURCE:
Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources);
break;
case UDF:
Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedUdfs);
break;
}
resultList.addAll(originResSet);
}
return resultList;
}
/**
* get user by user id
* @param userId user id
* @return User
*/
public User getUserById(int userId){
return userMapper.queryDetailsById(userId);
}
/**
* get resource by resoruce id
* @param resoruceId resource id
* @return Resource
*/
public Resource getResourceById(int resoruceId){
return resourceMapper.selectById(resoruceId);
}
/**
* list resources by ids
* @param resIds resIds
* @return resource list
*/
public List<Resource> listResourceByIds(Integer[] resIds){
return resourceMapper.listResourceByIds(resIds);
}
/**
* format task app id in task instance
* @param taskInstance
* @return
*/
public String formatTaskAppId(TaskInstance taskInstance){
ProcessDefinition definition = this.findProcessDefineById(taskInstance.getProcessDefinitionId());
ProcessInstance processInstanceById = this.findProcessInstanceById(taskInstance.getProcessInstanceId());
if(definition == null || processInstanceById == null){
return "";
}
return String.format("%s_%s_%s",
definition.getId(),
processInstanceById.getId(),
taskInstance.getId());
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,598 | [BUG] After renaming the file, downloading the file gives an error | 1. create file is test_1.sh
2. The file is renamed to test_1.txt, and the file name displayed on the page is "test_1.txt.sh"
3. Download file, prompt "error downloading resource file"
4. Check the hdfs resource file is "test_1.txt"
![image](https://user-images.githubusercontent.com/55787491/81133105-3d4a7900-8f83-11ea-9ce6-d36fa2f38a43.png)
![image](https://user-images.githubusercontent.com/55787491/81132928-a7aee980-8f82-11ea-8b0a-e3ed7231e560.png)
![image](https://user-images.githubusercontent.com/55787491/81133119-48050e00-8f83-11ea-84a5-83bc3f9f84d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2598 | https://github.com/apache/dolphinscheduler/pull/2732 | 63ce143be5fd76ea08f9fcf73e7720cb1be99297 | 60f8eb25fa69fa39cd17404b9eb66376c174b9c3 | "2020-05-06T02:17:35Z" | java | "2020-05-18T09:56:41Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.enums;
import org.springframework.context.i18n.LocaleContextHolder;
import java.util.Locale;
/**
* status enum
*/
public enum Status {
SUCCESS(0, "success", "成功"),
INTERNAL_SERVER_ERROR_ARGS(10000, "Internal Server Error: {0}", "服务端异常: {0}"),
REQUEST_PARAMS_NOT_VALID_ERROR(10001, "request parameter {0} is not valid", "请求参数[{0}]无效"),
TASK_TIMEOUT_PARAMS_ERROR(10002, "task timeout parameter is not valid", "任务超时参数无效"),
USER_NAME_EXIST(10003, "user name already exists", "用户名已存在"),
USER_NAME_NULL(10004,"user name is null", "用户名不能为空"),
HDFS_OPERATION_ERROR(10006, "hdfs operation error", "hdfs操作错误"),
TASK_INSTANCE_NOT_FOUND(10008, "task instance not found", "任务实例不存在"),
TENANT_NAME_EXIST(10009, "tenant code already exists", "租户编码不能为空"),
USER_NOT_EXIST(10010, "user {0} not exists", "用户[{0}]不存在"),
ALERT_GROUP_NOT_EXIST(10011, "alarm group not found", "告警组不存在"),
ALERT_GROUP_EXIST(10012, "alarm group already exists", "告警组名称已存在"),
USER_NAME_PASSWD_ERROR(10013,"user name or password error", "用户名或密码错误"),
LOGIN_SESSION_FAILED(10014,"create session failed!", "创建session失败"),
DATASOURCE_EXIST(10015, "data source name already exists", "数据源名称已存在"),
DATASOURCE_CONNECT_FAILED(10016, "data source connection failed", "建立数据源连接失败"),
TENANT_NOT_EXIST(10017, "tenant not exists", "租户不存在"),
PROJECT_NOT_FOUNT(10018, "project {0} not found ", "项目[{0}]不存在"),
PROJECT_ALREADY_EXISTS(10019, "project {0} already exists", "项目名称[{0}]已存在"),
TASK_INSTANCE_NOT_EXISTS(10020, "task instance {0} does not exist", "任务实例[{0}]不存在"),
TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE(10021, "task instance {0} is not sub process instance", "任务实例[{0}]不是子流程实例"),
SCHEDULE_CRON_NOT_EXISTS(10022, "scheduler crontab {0} does not exist", "调度配置定时表达式[{0}]不存在"),
SCHEDULE_CRON_ONLINE_FORBID_UPDATE(10023, "online status does not allow update operations", "调度配置上线状态不允许修改"),
SCHEDULE_CRON_CHECK_FAILED(10024, "scheduler crontab expression validation failure: {0}", "调度配置定时表达式验证失败: {0}"),
MASTER_NOT_EXISTS(10025, "master does not exist", "无可用master节点"),
SCHEDULE_STATUS_UNKNOWN(10026, "unknown status: {0}", "未知状态: {0}"),
CREATE_ALERT_GROUP_ERROR(10027,"create alert group error", "创建告警组错误"),
QUERY_ALL_ALERTGROUP_ERROR(10028,"query all alertgroup error", "查询告警组错误"),
LIST_PAGING_ALERT_GROUP_ERROR(10029,"list paging alert group error", "分页查询告警组错误"),
UPDATE_ALERT_GROUP_ERROR(10030,"update alert group error", "更新告警组错误"),
DELETE_ALERT_GROUP_ERROR(10031,"delete alert group error", "删除告警组错误"),
ALERT_GROUP_GRANT_USER_ERROR(10032,"alert group grant user error", "告警组授权用户错误"),
CREATE_DATASOURCE_ERROR(10033,"create datasource error", "创建数据源错误"),
UPDATE_DATASOURCE_ERROR(10034,"update datasource error", "更新数据源错误"),
QUERY_DATASOURCE_ERROR(10035,"query datasource error", "查询数据源错误"),
CONNECT_DATASOURCE_FAILURE(10036,"connect datasource failure", "建立数据源连接失败"),
CONNECTION_TEST_FAILURE(10037,"connection test failure", "测试数据源连接失败"),
DELETE_DATA_SOURCE_FAILURE(10038,"delete data source failure", "删除数据源失败"),
VERIFY_DATASOURCE_NAME_FAILURE(10039,"verify datasource name failure", "验证数据源名称失败"),
UNAUTHORIZED_DATASOURCE(10040,"unauthorized datasource", "未经授权的数据源"),
AUTHORIZED_DATA_SOURCE(10041,"authorized data source", "授权数据源失败"),
LOGIN_SUCCESS(10042,"login success", "登录成功"),
USER_LOGIN_FAILURE(10043,"user login failure", "用户登录失败"),
LIST_WORKERS_ERROR(10044,"list workers error", "查询worker列表错误"),
LIST_MASTERS_ERROR(10045,"list masters error", "查询master列表错误"),
UPDATE_PROJECT_ERROR(10046,"update project error", "更新项目信息错误"),
QUERY_PROJECT_DETAILS_BY_ID_ERROR(10047,"query project details by id error", "查询项目详细信息错误"),
CREATE_PROJECT_ERROR(10048,"create project error", "创建项目错误"),
LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR(10049,"login user query project list paging error", "分页查询项目列表错误"),
DELETE_PROJECT_ERROR(10050,"delete project error", "删除项目错误"),
QUERY_UNAUTHORIZED_PROJECT_ERROR(10051,"query unauthorized project error", "查询未授权项目错误"),
QUERY_AUTHORIZED_PROJECT(10052,"query authorized project", "查询授权项目错误"),
QUERY_QUEUE_LIST_ERROR(10053,"query queue list error", "查询队列列表错误"),
CREATE_RESOURCE_ERROR(10054,"create resource error", "创建资源错误"),
UPDATE_RESOURCE_ERROR(10055,"update resource error", "更新资源错误"),
QUERY_RESOURCES_LIST_ERROR(10056,"query resources list error", "查询资源列表错误"),
QUERY_RESOURCES_LIST_PAGING(10057,"query resources list paging", "分页查询资源列表错误"),
DELETE_RESOURCE_ERROR(10058,"delete resource error", "删除资源错误"),
VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR(10059,"verify resource by name and type error", "资源名称或类型验证错误"),
VIEW_RESOURCE_FILE_ON_LINE_ERROR(10060,"view resource file online error", "查看资源文件错误"),
CREATE_RESOURCE_FILE_ON_LINE_ERROR(10061,"create resource file online error", "创建资源文件错误"),
RESOURCE_FILE_IS_EMPTY(10062,"resource file is empty", "资源文件内容不能为空"),
EDIT_RESOURCE_FILE_ON_LINE_ERROR(10063,"edit resource file online error", "更新资源文件错误"),
DOWNLOAD_RESOURCE_FILE_ERROR(10064,"download resource file error", "下载资源文件错误"),
CREATE_UDF_FUNCTION_ERROR(10065 ,"create udf function error", "创建UDF函数错误"),
VIEW_UDF_FUNCTION_ERROR( 10066,"view udf function error", "查询UDF函数错误"),
UPDATE_UDF_FUNCTION_ERROR(10067,"update udf function error", "更新UDF函数错误"),
QUERY_UDF_FUNCTION_LIST_PAGING_ERROR( 10068,"query udf function list paging error", "分页查询UDF函数列表错误"),
QUERY_DATASOURCE_BY_TYPE_ERROR( 10069,"query datasource by type error", "查询数据源信息错误"),
VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"),
DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"),
AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"),
AUTHORIZE_RESOURCE_TREE( 10073,"authorize resource tree display error","授权资源目录树错误"),
UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"),
AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"),
CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"),
UPDATE_SCHEDULE_ERROR(10077,"update schedule error", "更新调度配置错误"),
PUBLISH_SCHEDULE_ONLINE_ERROR(10078,"publish schedule online error", "上线调度配置错误"),
OFFLINE_SCHEDULE_ERROR(10079,"offline schedule error", "下线调度配置错误"),
QUERY_SCHEDULE_LIST_PAGING_ERROR(10080,"query schedule list paging error", "分页查询调度配置列表错误"),
QUERY_SCHEDULE_LIST_ERROR(10081,"query schedule list error", "查询调度配置列表错误"),
QUERY_TASK_LIST_PAGING_ERROR(10082,"query task list paging error", "分页查询任务列表错误"),
QUERY_TASK_RECORD_LIST_PAGING_ERROR(10083,"query task record list paging error", "分页查询任务记录错误"),
CREATE_TENANT_ERROR(10084,"create tenant error", "创建租户错误"),
QUERY_TENANT_LIST_PAGING_ERROR(10085,"query tenant list paging error", "分页查询租户列表错误"),
QUERY_TENANT_LIST_ERROR(10086,"query tenant list error", "查询租户列表错误"),
UPDATE_TENANT_ERROR(10087,"update tenant error", "更新租户错误"),
DELETE_TENANT_BY_ID_ERROR(10088,"delete tenant by id error", "删除租户错误"),
VERIFY_TENANT_CODE_ERROR(10089,"verify tenant code error", "租户编码验证错误"),
CREATE_USER_ERROR(10090,"create user error", "创建用户错误"),
QUERY_USER_LIST_PAGING_ERROR(10091,"query user list paging error", "分页查询用户列表错误"),
UPDATE_USER_ERROR(10092,"update user error", "更新用户错误"),
DELETE_USER_BY_ID_ERROR(10093,"delete user by id error", "删除用户错误"),
GRANT_PROJECT_ERROR(10094,"grant project error", "授权项目错误"),
GRANT_RESOURCE_ERROR(10095,"grant resource error", "授权资源错误"),
GRANT_UDF_FUNCTION_ERROR(10096,"grant udf function error", "授权UDF函数错误"),
GRANT_DATASOURCE_ERROR(10097,"grant datasource error", "授权数据源错误"),
GET_USER_INFO_ERROR(10098,"get user info error", "获取用户信息错误"),
USER_LIST_ERROR(10099,"user list error", "查询用户列表错误"),
VERIFY_USERNAME_ERROR(10100,"verify username error", "用户名验证错误"),
UNAUTHORIZED_USER_ERROR(10101,"unauthorized user error", "查询未授权用户错误"),
AUTHORIZED_USER_ERROR(10102,"authorized user error", "查询授权用户错误"),
QUERY_TASK_INSTANCE_LOG_ERROR(10103,"view task instance log error", "查询任务实例日志错误"),
DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104,"download task instance log file error", "下载任务日志文件错误"),
CREATE_PROCESS_DEFINITION(10105,"create process definition", "创建工作流错误"),
VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106,"verify process definition name unique error", "工作流名称已存在"),
UPDATE_PROCESS_DEFINITION_ERROR(10107,"update process definition error", "更新工作流定义错误"),
RELEASE_PROCESS_DEFINITION_ERROR(10108,"release process definition error", "上线工作流错误"),
QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR(10109,"query datail of process definition error", "查询工作流详细信息错误"),
QUERY_PROCESS_DEFINITION_LIST(10110,"query process definition list", "查询工作流列表错误"),
ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR(10111,"encapsulation treeview structure error", "查询工作流树形图数据错误"),
GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR(10112,"get tasks list by process definition id error", "查询工作流定义节点信息错误"),
QUERY_PROCESS_INSTANCE_LIST_PAGING_ERROR(10113,"query process instance list paging error", "分页查询工作流实例列表错误"),
QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_ERROR(10114,"query task list by process instance id error", "查询任务实例列表错误"),
UPDATE_PROCESS_INSTANCE_ERROR(10115,"update process instance error", "更新工作流实例错误"),
QUERY_PROCESS_INSTANCE_BY_ID_ERROR(10116,"query process instance by id error", "查询工作流实例错误"),
DELETE_PROCESS_INSTANCE_BY_ID_ERROR(10117,"delete process instance by id error", "删除工作流实例错误"),
QUERY_SUB_PROCESS_INSTANCE_DETAIL_INFO_BY_TASK_ID_ERROR(10118,"query sub process instance detail info by task id error", "查询子流程任务实例错误"),
QUERY_PARENT_PROCESS_INSTANCE_DETAIL_INFO_BY_SUB_PROCESS_INSTANCE_ID_ERROR(10119,"query parent process instance detail info by sub process instance id error", "查询子流程该工作流实例错误"),
QUERY_PROCESS_INSTANCE_ALL_VARIABLES_ERROR(10120,"query process instance all variables error", "查询工作流自定义变量信息错误"),
ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR(10121,"encapsulation process instance gantt structure error", "查询工作流实例甘特图数据错误"),
QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR(10122,"query process definition list paging error", "分页查询工作流定义列表错误"),
SIGN_OUT_ERROR(10123,"sign out error", "退出错误"),
TENANT_CODE_HAS_ALREADY_EXISTS(10124,"tenant code has already exists", "租户编码已存在"),
IP_IS_EMPTY(10125,"ip is empty", "IP地址不能为空"),
SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE(10126, "schedule release is already {0}", "调度配置上线错误[{0}]"),
CREATE_QUEUE_ERROR(10127, "create queue error", "创建队列错误"),
QUEUE_NOT_EXIST(10128, "queue {0} not exists", "队列ID[{0}]不存在"),
QUEUE_VALUE_EXIST(10129, "queue value {0} already exists", "队列值[{0}]已存在"),
QUEUE_NAME_EXIST(10130, "queue name {0} already exists", "队列名称[{0}]已存在"),
UPDATE_QUEUE_ERROR(10131, "update queue error", "更新队列信息错误"),
NEED_NOT_UPDATE_QUEUE(10132, "no content changes, no updates are required", "数据未变更,不需要更新队列信息"),
VERIFY_QUEUE_ERROR(10133,"verify queue error", "验证队列信息错误"),
NAME_NULL(10134,"name must be not null", "名称不能为空"),
NAME_EXIST(10135, "name {0} already exists", "名称[{0}]已存在"),
SAVE_ERROR(10136, "save error", "保存错误"),
DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!", "请先删除全部工作流定义"),
BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117,"batch delete process instance by ids {0} error", "批量删除工作流实例错误"),
PREVIEW_SCHEDULE_ERROR(10139,"preview schedule error", "预览调度配置错误"),
PARSE_TO_CRON_EXPRESSION_ERROR(10140,"parse cron to cron expression error", "解析调度表达式错误"),
SCHEDULE_START_TIME_END_TIME_SAME(10141,"The start time must not be the same as the end", "开始时间不能和结束时间一样"),
DELETE_TENANT_BY_ID_FAIL(10142,"delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"),
DELETE_TENANT_BY_ID_FAIL_DEFINES(10143,"delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"),
DELETE_TENANT_BY_ID_FAIL_USERS(10144,"delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"),
DELETE_WORKER_GROUP_BY_ID_FAIL(10145,"delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"),
QUERY_WORKER_GROUP_FAIL(10146,"query worker group fail ", "查询worker分组失败"),
DELETE_WORKER_GROUP_FAIL(10147,"delete worker group fail ", "删除worker分组失败"),
COPY_PROCESS_DEFINITION_ERROR(10148,"copy process definition error", "复制工作流错误"),
UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"),
UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"),
RESOURCE_NOT_EXIST(20004, "resource not exist", "资源不存在"),
RESOURCE_EXIST(20005, "resource already exists", "资源已存在"),
RESOURCE_SUFFIX_NOT_SUPPORT_VIEW(20006, "resource suffix do not support online viewing", "资源文件后缀不支持查看"),
RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"),
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"),
HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource","请检查任务节点并移除无权限或者已删除的资源"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),
USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission", "当前用户[{0}]没有[{1}]项目的操作权限"),
PROCESS_INSTANCE_NOT_EXIST(50001, "process instance {0} does not exist", "工作流实例[{0}]不存在"),
PROCESS_INSTANCE_EXIST(50002, "process instance {0} already exists", "工作流实例[{0}]已存在"),
PROCESS_DEFINE_NOT_EXIST(50003, "process definition {0} does not exist", "工作流定义[{0}]不存在"),
PROCESS_DEFINE_NOT_RELEASE(50004, "process definition {0} not on line", "工作流定义[{0}]不是上线状态"),
PROCESS_INSTANCE_ALREADY_CHANGED(50005, "the status of process instance {0} is already {1}", "工作流实例[{0}]的状态已经是[{1}]"),
PROCESS_INSTANCE_STATE_OPERATION_ERROR(50006, "the status of process instance {0} is {1},Cannot perform {2} operation", "工作流实例[{0}]的状态是[{1}],无法执行[{2}]操作"),
SUB_PROCESS_INSTANCE_NOT_EXIST(50007, "the task belong to process instance does not exist", "子工作流实例不存在"),
PROCESS_DEFINE_NOT_ALLOWED_EDIT(50008, "process definition {0} does not allow edit", "工作流定义[{0}]不允许修改"),
PROCESS_INSTANCE_EXECUTING_COMMAND(50009, "process instance {0} is executing the command, please wait ...", "工作流实例[{0}]正在执行命令,请稍等..."),
PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE(50010, "process instance {0} is not sub process instance", "工作流实例[{0}]不是子工作流实例"),
TASK_INSTANCE_STATE_COUNT_ERROR(50011,"task instance state count error", "查询各状态任务实例数错误"),
COUNT_PROCESS_INSTANCE_STATE_ERROR(50012,"count process instance state error", "查询各状态流程实例数错误"),
COUNT_PROCESS_DEFINITION_USER_ERROR(50013,"count process definition user error", "查询各用户流程定义数错误"),
START_PROCESS_INSTANCE_ERROR(50014,"start process instance error", "运行工作流实例错误"),
EXECUTE_PROCESS_INSTANCE_ERROR(50015,"execute process instance error", "操作工作流实例错误"),
CHECK_PROCESS_DEFINITION_ERROR(50016,"check process definition error", "检查工作流实例错误"),
QUERY_RECIPIENTS_AND_COPYERS_BY_PROCESS_DEFINITION_ERROR(50017,"query recipients and copyers by process definition error", "查询收件人和抄送人错误"),
DATA_IS_NOT_VALID(50017,"data {0} not valid", "数据[{0}]无效"),
DATA_IS_NULL(50018,"data {0} is null", "数据[{0}]不能为空"),
PROCESS_NODE_HAS_CYCLE(50019,"process node has cycle", "流程节点间存在循环依赖"),
PROCESS_NODE_S_PARAMETER_INVALID(50020,"process node %s parameter invalid", "流程节点[%s]参数无效"),
PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line", "工作流定义[{0}]已上线"),
DELETE_PROCESS_DEFINE_BY_ID_ERROR(50022,"delete process definition by id error", "删除工作流定义错误"),
SCHEDULE_CRON_STATE_ONLINE(50023,"the status of schedule {0} is already on line", "调度配置[{0}]已上线"),
DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024,"delete schedule by id error", "删除调度配置错误"),
BATCH_DELETE_PROCESS_DEFINE_ERROR(50025,"batch delete process definition error", "批量删除工作流定义错误"),
BATCH_DELETE_PROCESS_DEFINE_BY_IDS_ERROR(50026,"batch delete process definition by ids {0} error", "批量删除工作流定义[{0}]错误"),
TENANT_NOT_SUITABLE(50027,"there is not any tenant suitable, please choose a tenant available.", "没有合适的租户,请选择可用的租户"),
EXPORT_PROCESS_DEFINE_BY_ID_ERROR(50028,"export process definition by id error", "导出工作流定义错误"),
BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR(50028,"batch export process definition by ids error", "批量导出工作流定义错误"),
IMPORT_PROCESS_DEFINE_ERROR(50029,"import process definition error", "导入工作流定义错误"),
HDFS_NOT_STARTUP(60001,"hdfs not startup", "hdfs未启用"),
/**
* for monitor
*/
QUERY_DATABASE_STATE_ERROR(70001,"query database state error", "查询数据库状态错误"),
QUERY_ZOOKEEPER_STATE_ERROR(70002,"query zookeeper state error", "查询zookeeper状态错误"),
CREATE_ACCESS_TOKEN_ERROR(70010,"create access token error", "创建访问token错误"),
GENERATE_TOKEN_ERROR(70011,"generate token error", "生成token错误"),
QUERY_ACCESSTOKEN_LIST_PAGING_ERROR(70012,"query access token list paging error", "分页查询访问token列表错误"),
UPDATE_ACCESS_TOKEN_ERROR(70013,"update access token error", "更新访问token错误"),
DELETE_ACCESS_TOKEN_ERROR(70014,"delete access token error", "删除访问token错误"),
ACCESS_TOKEN_NOT_EXIST(70015, "access token not exist", "访问token不存在"),
COMMAND_STATE_COUNT_ERROR(80001,"task instance state count error", "查询各状态任务实例数错误"),
QUEUE_COUNT_ERROR(90001,"queue count error", "查询队列数据错误"),
KERBEROS_STARTUP_STATE(100001,"get kerberos startup state error", "获取kerberos启动状态错误"),
;
private final int code;
private final String enMsg;
private final String zhMsg;
private Status(int code, String enMsg, String zhMsg) {
this.code = code;
this.enMsg = enMsg;
this.zhMsg = zhMsg;
}
public int getCode() {
return this.code;
}
public String getMsg() {
if (Locale.SIMPLIFIED_CHINESE.getLanguage().equals(LocaleContextHolder.getLocale().getLanguage())) {
return this.zhMsg;
} else {
return this.enMsg;
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,598 | [BUG] After renaming the file, downloading the file gives an error | 1. create file is test_1.sh
2. The file is renamed to test_1.txt, and the file name displayed on the page is "test_1.txt.sh"
3. Download file, prompt "error downloading resource file"
4. Check the hdfs resource file is "test_1.txt"
![image](https://user-images.githubusercontent.com/55787491/81133105-3d4a7900-8f83-11ea-9ce6-d36fa2f38a43.png)
![image](https://user-images.githubusercontent.com/55787491/81132928-a7aee980-8f82-11ea-8b0a-e3ed7231e560.png)
![image](https://user-images.githubusercontent.com/55787491/81133119-48050e00-8f83-11ea-84a5-83bc3f9f84d3.png)
**Which version of Dolphin Scheduler:**
-[dev]
| https://github.com/apache/dolphinscheduler/issues/2598 | https://github.com/apache/dolphinscheduler/pull/2732 | 63ce143be5fd76ea08f9fcf73e7720cb1be99297 | 60f8eb25fa69fa39cd17404b9eb66376c174b9c3 | "2020-05-06T02:17:35Z" | java | "2020-05-18T09:56:41Z" | dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.exceptions.ServiceException;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.text.MessageFormat;
import java.util.*;
import java.util.regex.Matcher;
import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* resources service
*/
@Service
public class ResourcesService extends BaseService {
private static final Logger logger = LoggerFactory.getLogger(ResourcesService.class);
@Autowired
private ResourceMapper resourcesMapper;
@Autowired
private UdfFuncMapper udfFunctionMapper;
@Autowired
private TenantMapper tenantMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private ResourceUserMapper resourceUserMapper;
@Autowired
private ProcessDefinitionMapper processDefinitionMapper;
/**
* create directory
*
* @param loginUser login user
* @param name alias
* @param description description
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create directory result
*/
@Transactional(rollbackFor = Exception.class)
public Result createDirectory(User loginUser,
String name,
String description,
ResourceType type,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource directory {} has exist, can't recreate", fullName);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<String, Object>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
//create directory in hdfs
createDirecotry(loginUser,fullName,type,result);
return result;
}
/**
* create resource
*
* @param loginUser login user
* @param name alias
* @param desc description
* @param file file
* @param type type
* @param pid parent id
* @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result createResource(User loginUser,
String name,
String desc,
ResourceType type,
MultipartFile file,
int pid,
String currentDir) {
Result result = new Result();
// if hdfs not startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
if (pid != -1) {
Resource parentResource = resourcesMapper.selectById(pid);
if (parentResource == null) {
putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, parentResource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
}
// file is empty
if (file.isEmpty()) {
logger.error("file is empty: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_FILE_IS_EMPTY);
return result;
}
// file suffix
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(name);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
/**
* rename file suffix and original suffix must be consistent
*/
logger.error("rename file suffix and original suffix must be consistent: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE);
return result;
}
//If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar
if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) {
logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg());
putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR);
return result;
}
if (file.getSize() > Constants.MAX_FILE_SIZE) {
logger.error("file size is too large: {}", file.getOriginalFilename());
putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT);
return result;
}
// check resoure name exists
String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error("resource already exists, can't recreate ", e);
throw new RuntimeException("resource already exists, can't recreate");
}
// fail upload
if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
}
return result;
}
/**
* check resource is exists
*
* @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
private boolean checkResourceExists(String fullName, int userId, int type ){
List<Resource> resources = resourcesMapper.queryResourceList(fullName, userId, type);
if (resources != null && resources.size() > 0) {
return true;
}
return false;
}
/**
* update resource
* @param loginUser login user
* @param resourceId resource id
* @param name name
* @param desc description
* @param type resource type
* @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
int resourceId,
String name,
String desc,
ResourceType type) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
if (name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) {
putMsg(result, Status.SUCCESS);
return result;
}
//check resource aleady exists
String originFullName = resource.getFullName();
String originResourceName = resource.getAlias();
String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
}
// query tenant by user id
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// verify whether the resource exists in storage
// get the path of origin file in storage
String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
try {
if (!HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.error("{} not exist", originHdfsFileName);
putMsg(result,Status.RESOURCE_NOT_EXIST);
return result;
}
} catch (IOException e) {
logger.error(e.getMessage(),e);
throw new ServiceException(Status.HDFS_OPERATION_ERROR);
}
String nameWithSuffix = name;
if (!resource.isDirectory()) {
//get the file suffix
String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
//if the name without suffix then add it ,else use the origin name
if(!name.endsWith(suffix)){
nameWithSuffix = nameWithSuffix + suffix;
}
}
// updateResource data
List<Integer> childrenResource = listAllChildren(resource,false);
String oldFullName = resource.getFullName();
Date now = new Date();
resource.setAlias(nameWithSuffix);
resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
try {
resourcesMapper.updateById(resource);
if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
String matcherFullName = Matcher.quoteReplacement(fullName);
List<Resource> childResourceList = new ArrayList<>();
List<Resource> resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
childResourceList = resourceList.stream().map(t -> {
t.setFullName(t.getFullName().replaceFirst(oldFullName, matcherFullName));
t.setUpdateTime(now);
return t;
}).collect(Collectors.toList());
resourcesMapper.batchUpdateResource(childResourceList);
}
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>(5);
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
} catch (Exception e) {
logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e);
throw new ServiceException(Status.UPDATE_RESOURCE_ERROR);
}
// if name unchanged, return directly without moving on HDFS
if (originResourceName.equals(name)) {
return result;
}
// get the path of dest file in hdfs
String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
try {
logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true);
} catch (Exception e) {
logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e);
putMsg(result,Status.HDFS_COPY_FAIL);
throw new ServiceException(Status.HDFS_COPY_FAIL);
}
return result;
}
/**
* query resources list paging
*
* @param loginUser login user
* @param type resource type
* @param searchVal search value
* @param pageNo page number
* @param pageSize page size
* @return resource list page
*/
public Map<String, Object> queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap<String, Object> result = new HashMap<>(5);
Page<Resource> page = new Page(pageNo, pageSize);
int userId = loginUser.getId();
if (isAdmin(loginUser)) {
userId= 0;
}
if (direcotryId != -1) {
Resource directory = resourcesMapper.selectById(direcotryId);
if (directory == null) {
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
}
IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page,
userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo<Resource>(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* create direcoty
* @param loginUser login user
* @param fullName full name
* @param type resource type
* @param result Result
*/
private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
createTenantDirIfNotExists(tenantCode);
}
if (!HadoopUtils.getInstance().mkdir(directoryName)) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
} catch (Exception e) {
logger.error("create resource directory {} of hdfs failed",directoryName);
putMsg(result,Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
}
}
/**
* upload file to hdfs
*
* @param loginUser login user
* @param fullName full name
* @param file file
*/
private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
return false;
}
// query tenant
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
// random file name
String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
// save file to hdfs, and delete original file
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
createTenantDirIfNotExists(tenantCode);
}
org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename);
HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
return false;
}
return true;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList);
//JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* query resource list
*
* @param loginUser login user
* @param type resource type
* @return resource list
*/
public Map<String, Object> queryResourceJarList(User loginUser, ResourceType type) {
Map<String, Object> result = new HashMap<>(5);
int userId = loginUser.getId();
if(isAdmin(loginUser)){
userId = 0;
}
List<Resource> allResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal(),0);
List<Resource> resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* delete resource
*
* @param loginUser login user
* @param resourceId resource id
* @return delete result code
* @throws Exception exception
*/
@Transactional(rollbackFor = Exception.class)
public Result delete(User loginUser, int resourceId) throws Exception {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//get resource and hdfs path
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
if (!hasPerm(loginUser, resource.getUserId())) {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// get all resource id of process definitions those is released
List<Map<String, Object>> list = processDefinitionMapper.listResources();
Map<Integer, Set<Integer>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list);
Set<Integer> resourceIdSet = resourceProcessMap.keySet();
// get all children of the resource
List<Integer> allChildren = listAllChildren(resource,true);
Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]);
//if resource type is UDF,need check whether it is bound by UDF functon
if (resource.getType() == (ResourceType.UDF)) {
List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray);
if (CollectionUtils.isNotEmpty(udfFuncs)) {
logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
return result;
}
}
if (resourceIdSet.contains(resource.getPid())) {
logger.error("can't be deleted,because it is used of process definition");
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
resourceIdSet.retainAll(allChildren);
if (CollectionUtils.isNotEmpty(resourceIdSet)) {
logger.error("can't be deleted,because it is used of process definition");
for (Integer resId : resourceIdSet) {
logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
}
putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
// get hdfs file by type
String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
resourcesMapper.deleteIds(needDeleteResourceIdArray);
resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray);
//delete file on hdfs
HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
}
/**
* verify resource by name and type
* @param loginUser login user
* @param fullName resource full name
* @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
if(tenant != null){
String tenantCode = tenant.getTenantCode();
try {
String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
} catch (Exception e) {
logger.error(e.getMessage(),e);
putMsg(result,Status.HDFS_OPERATION_ERROR);
}
}else{
putMsg(result,Status.TENANT_NOT_EXIST);
}
}
return result;
}
/**
* verify resource by full name or pid and type
* @param fullName resource full name
* @param id resource id
* @param type resource type
* @return true if the resource full name or pid not exists, otherwise return false
*/
public Result queryResource(String fullName,Integer id,ResourceType type) {
Result result = new Result();
if (StringUtils.isBlank(fullName) && id == null) {
logger.error("You must input one of fullName and pid");
putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
return result;
}
if (StringUtils.isNotBlank(fullName)) {
List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
if (CollectionUtils.isEmpty(resourceList)) {
logger.error("resource file not exist, resource full name {} ", fullName);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(resourceList.get(0));
} else {
Resource resource = resourcesMapper.selectById(id);
if (resource == null) {
logger.error("resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
Resource parentResource = resourcesMapper.selectById(resource.getPid());
if (parentResource == null) {
logger.error("parent resource file not exist, resource id {}", id);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
putMsg(result, Status.SUCCESS);
result.setData(parentResource);
}
return result;
}
/**
* view resource file online
*
* @param resourceId resource id
* @param skipLineNum skip line number
* @param limit limit
* @return resource content
*/
public Result readResource(int resourceId, int skipLineNum, int limit) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check preview or not by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
// hdfs path
String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit);
putMsg(result, Status.SUCCESS);
Map<String, Object> map = new HashMap<>();
map.put(ALIAS, resource.getAlias());
map.put(CONTENT, String.join("\n", content));
result.setData(map);
}else{
logger.error("read file {} not exist in hdfs", hdfsFileName);
putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName);
}
} catch (Exception e) {
logger.error("Resource {} read failed", hdfsFileName, e);
putMsg(result, Status.HDFS_OPERATION_ERROR);
}
return result;
}
/**
* create resource file online
*
* @param loginUser login user
* @param type resource type
* @param fileName file name
* @param fileSuffix file suffix
* @param desc description
* @param content content
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
//check file suffix
String nameSuffix = fileSuffix.trim();
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resouce suffix {} not support create", nameSuffix);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String name = fileName.trim() + "." + nameSuffix;
String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
putMsg(result, Status.SUCCESS);
Map<Object, Object> dataMap = new BeanMap(resource);
Map<String, Object> resultMap = new HashMap<>();
for (Map.Entry<Object, Object> entry: dataMap.entrySet()) {
if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) {
resultMap.put(entry.getKey().toString(), entry.getValue());
}
}
result.setData(resultMap);
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* updateProcessInstance resource
*
* @param resourceId resource id
* @param content content
* @return update result cod
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResourceContent(int resourceId, String content) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
putMsg(result, Status.HDFS_NOT_STARTUP);
return result;
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("read file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
//check can edit by file suffix
String nameSuffix = FileUtils.suffix(resource.getAlias());
String resourceViewSuffixs = FileUtils.getResourceViewSuffixs();
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List<String> strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
}
String tenantCode = getTenantCode(resource.getUserId(),result);
if (StringUtils.isEmpty(tenantCode)){
return result;
}
resource.setSize(content.getBytes().length);
resource.setUpdateTime(new Date());
resourcesMapper.updateById(resource);
result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
return result;
}
/**
* @param resourceName resource name
* @param tenantCode tenant code
* @param content content
* @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
String localFilename = "";
String hdfsFileName = "";
try {
localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString());
if (!FileUtils.writeContent2File(content, localFilename)) {
// write file fail
logger.error("file {} fail, content is {}", localFilename, content);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
// get resource file hdfs path
hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils hadoopUtils = HadoopUtils.getInstance();
if (!hadoopUtils.exists(resourcePath)) {
// create if tenant dir not exists
createTenantDirIfNotExists(tenantCode);
}
if (hadoopUtils.exists(hdfsFileName)) {
hadoopUtils.delete(hdfsFileName, false);
}
hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true);
} catch (Exception e) {
logger.error(e.getMessage(), e);
result.setCode(Status.HDFS_OPERATION_ERROR.getCode());
result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName));
return result;
}
putMsg(result, Status.SUCCESS);
return result;
}
/**
* download file
*
* @param resourceId resource id
* @return resource content
* @throws Exception exception
*/
public org.springframework.core.io.Resource downloadResource(int resourceId) throws Exception {
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
throw new RuntimeException("hdfs not startup");
}
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
if (resource.isDirectory()) {
logger.error("resource id {} is directory,can't download it", resourceId);
throw new RuntimeException("cant't download directory");
}
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true);
return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName);
}
/**
* list all file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<ResourceComponent> list ;
if (CollectionUtils.isNotEmpty(resourceList)) {
Visitor visitor = new ResourceTreeVisitor(resourceList);
list = visitor.visit().getChildren();
}else {
list = new ArrayList<>(0);
}
result.put(Constants.DATA_LIST, list);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized file
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId);
List<Resource> list ;
if (resourceList != null && resourceList.size() > 0) {
Set<Resource> resourceSet = new HashSet<>(resourceList);
List<Resource> authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
getAuthorizedResourceList(resourceSet, authedResourceList);
list = new ArrayList<>(resourceSet);
}else {
list = new ArrayList<>(0);
}
Visitor visitor = new ResourceTreeVisitor(list);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* unauthorized udf function
*
* @param loginUser login user
* @param userId user id
* @return unauthorized result code
*/
public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
//only admin can operate
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId);
List<UdfFunc> resultList = new ArrayList<>();
Set<UdfFunc> udfFuncSet = null;
if (CollectionUtils.isNotEmpty(udfFuncList)) {
udfFuncSet = new HashSet<>(udfFuncList);
List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId);
getAuthorizedResourceList(udfFuncSet, authedUDFFuncList);
resultList = new ArrayList<>(udfFuncSet);
}
result.put(Constants.DATA_LIST, resultList);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized udf function
*
* @param loginUser login user
* @param userId user id
* @return authorized result code
*/
public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>();
if (checkAdmin(loginUser, result)) {
return result;
}
List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId);
result.put(Constants.DATA_LIST, udfFuncs);
putMsg(result,Status.SUCCESS);
return result;
}
/**
* authorized file
*
* @param loginUser login user
* @param userId user id
* @return authorized result
*/
public Map<String, Object> authorizedFile(User loginUser, Integer userId) {
Map<String, Object> result = new HashMap<>(5);
if (checkAdmin(loginUser, result)){
return result;
}
List<Resource> authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
Visitor visitor = new ResourceTreeVisitor(authedResources);
logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
logger.info(jsonTreeStr);
result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
/**
* get authorized resource list
*
* @param resourceSet resource set
* @param authedResourceList authorized resource list
*/
private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) {
Set<?> authedResourceSet = null;
if (CollectionUtils.isNotEmpty(authedResourceList)) {
authedResourceSet = new HashSet<>(authedResourceList);
resourceSet.removeAll(authedResourceSet);
}
}
/**
* get tenantCode by UserId
*
* @param userId user id
* @param result return result
* @return
*/
private String getTenantCode(int userId,Result result){
User user = userMapper.queryDetailsById(userId);
if(user == null){
logger.error("user {} not exists", userId);
putMsg(result, Status.USER_NOT_EXIST,userId);
return null;
}
Tenant tenant = tenantMapper.queryById(user.getTenantId());
if (tenant == null){
logger.error("tenant not exists");
putMsg(result, Status.TENANT_NOT_EXIST);
return null;
}
return tenant.getTenantCode();
}
/**
* list all children id
* @param resource resource
* @param containSelf whether add self to children list
* @return all children id
*/
List<Integer> listAllChildren(Resource resource,boolean containSelf){
List<Integer> childList = new ArrayList<>();
if (resource.getId() != -1 && containSelf) {
childList.add(resource.getId());
}
if(resource.isDirectory()){
listAllChildren(resource.getId(),childList);
}
return childList;
}
/**
* list all children id
* @param resourceId resource id
* @param childList child list
*/
void listAllChildren(int resourceId,List<Integer> childList){
List<Integer> children = resourcesMapper.listChildren(resourceId);
for(int chlidId:children){
childList.add(chlidId);
listAllChildren(chlidId,childList);
}
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,634 | Process shows success,when the task of the process is killed in the terminal | 重现步骤:
1、有两个工作流AA和BB,BB依赖AA。
2、AA中有test1和test2任务,test2任务依赖test1。
3、BB中有dependent和test3任务,dependent配置AA的test2,test3依赖dependent。
4、现在手工运行AA,(test1任务可以配置shell,shell中运行一个test1.sh文件,该文件中可以写个sleep 60s),在执行worker的终端kill掉该任务(这种情况是手工kill,实际生产中服务器压力大时很有可能被其他服务kill掉)。
5、此时显示工作流AA运行成功,AA中test1任务kill状态,test2任务没有运行。
6、手工运行BB,可以看到BB中dependent和test3任务都运行成功。
正确的场景是AA中test1被kill后,AA应该是失败状态而不是成功状态,BB因dependent失败而失败
--------------------------------------------------------------------------------------
Reproduction steps:
1. There are two workflows AA and BB, and BB depends on AA.
2. There are test1 and test2 tasks in AA, and test2 tasks depend on test1.
3. There are DEPENDENCT and test3 tasks in BB. DEPENDENCT configures test2 of AA, and test3 depends on the DEPENDENCT.
4. Now run AA manually (test1 task can configure the shell, and a test1.sh file can be run in the shell, and a sleep 60s can be written in the file). Kill the task at the terminal executing the worker (in this case, kill manually. In actual production, when the server is under great pressure, it is likely to be killed by other services).
5. At this time, the workflow AA runs successfully, the test1 task in AA is in kill status, and the test2 task is not running.
6. Run BB manually, and you can see that the DEPENDENCT and test3 tasks in BB run successfully.
I think the correct scenario is that after test1 in AA is killed, AA should be in failure state instead of success state. BB fails due to the DEPENDENCT failure | https://github.com/apache/dolphinscheduler/issues/2634 | https://github.com/apache/dolphinscheduler/pull/2731 | 9ffcd82447cd355955d110fe19afb33f465f9d7e | a9da86abb4545b1ac8fdc5cdd05e85bdcdffa9c6 | "2020-05-08T06:20:02Z" | java | "2020-05-18T10:27:16Z" | dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/ExecutionStatus.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.enums;
import com.baomidou.mybatisplus.annotation.EnumValue;
/**
* running status for workflow and task nodes
*
*/
public enum ExecutionStatus {
/**
* status:
* 0 submit success
* 1 running
* 2 ready pause
* 3 pause
* 4 ready stop
* 5 stop
* 6 failure
* 7 success
* 8 need fault tolerance
* 9 kill
* 10 waiting thread
* 11 waiting depend node complete
*/
SUBMITTED_SUCCESS(0, "submit success"),
RUNNING_EXEUTION(1, "running"),
READY_PAUSE(2, "ready pause"),
PAUSE(3, "pause"),
READY_STOP(4, "ready stop"),
STOP(5, "stop"),
FAILURE(6, "failure"),
SUCCESS(7, "success"),
NEED_FAULT_TOLERANCE(8, "need fault tolerance"),
KILL(9, "kill"),
WAITTING_THREAD(10, "waiting thread"),
WAITTING_DEPEND(11, "waiting depend node complete");
ExecutionStatus(int code, String descp){
this.code = code;
this.descp = descp;
}
@EnumValue
private final int code;
private final String descp;
/**
* status is success
* @return status
*/
public boolean typeIsSuccess(){
return this == SUCCESS;
}
/**
* status is failure
* @return status
*/
public boolean typeIsFailure(){
return this == FAILURE || this == NEED_FAULT_TOLERANCE || this == KILL;
}
/**
* status is finished
* @return status
*/
public boolean typeIsFinished(){
return typeIsSuccess() || typeIsFailure() || typeIsCancel() || typeIsPause()
|| typeIsWaittingThread();
}
/**
* status is waiting thread
* @return status
*/
public boolean typeIsWaittingThread(){
return this == WAITTING_THREAD;
}
/**
* status is pause
* @return status
*/
public boolean typeIsPause(){
return this == PAUSE;
}
/**
* status is running
* @return status
*/
public boolean typeIsRunning(){
return this == RUNNING_EXEUTION || this == WAITTING_DEPEND;
}
/**
* status is cancel
* @return status
*/
public boolean typeIsCancel(){
return this == KILL || this == STOP ;
}
public int getCode() {
return code;
}
public String getDescp() {
return descp;
}
public static ExecutionStatus of(int status){
for(ExecutionStatus es : values()){
if(es.getCode() == status){
return es;
}
}
throw new IllegalArgumentException("invalid status : " + status);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,634 | Process shows success,when the task of the process is killed in the terminal | 重现步骤:
1、有两个工作流AA和BB,BB依赖AA。
2、AA中有test1和test2任务,test2任务依赖test1。
3、BB中有dependent和test3任务,dependent配置AA的test2,test3依赖dependent。
4、现在手工运行AA,(test1任务可以配置shell,shell中运行一个test1.sh文件,该文件中可以写个sleep 60s),在执行worker的终端kill掉该任务(这种情况是手工kill,实际生产中服务器压力大时很有可能被其他服务kill掉)。
5、此时显示工作流AA运行成功,AA中test1任务kill状态,test2任务没有运行。
6、手工运行BB,可以看到BB中dependent和test3任务都运行成功。
正确的场景是AA中test1被kill后,AA应该是失败状态而不是成功状态,BB因dependent失败而失败
--------------------------------------------------------------------------------------
Reproduction steps:
1. There are two workflows AA and BB, and BB depends on AA.
2. There are test1 and test2 tasks in AA, and test2 tasks depend on test1.
3. There are DEPENDENCT and test3 tasks in BB. DEPENDENCT configures test2 of AA, and test3 depends on the DEPENDENCT.
4. Now run AA manually (test1 task can configure the shell, and a test1.sh file can be run in the shell, and a sleep 60s can be written in the file). Kill the task at the terminal executing the worker (in this case, kill manually. In actual production, when the server is under great pressure, it is likely to be killed by other services).
5. At this time, the workflow AA runs successfully, the test1 task in AA is in kill status, and the test2 task is not running.
6. Run BB manually, and you can see that the DEPENDENCT and test3 tasks in BB run successfully.
I think the correct scenario is that after test1 in AA is killed, AA should be in failure state instead of success state. BB fails due to the DEPENDENCT failure | https://github.com/apache/dolphinscheduler/issues/2634 | https://github.com/apache/dolphinscheduler/pull/2731 | 9ffcd82447cd355955d110fe19afb33f465f9d7e | a9da86abb4545b1ac8fdc5cdd05e85bdcdffa9c6 | "2020-05-08T06:20:02Z" | java | "2020-05-18T10:27:16Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import com.alibaba.fastjson.JSON;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.*;
import org.apache.dolphinscheduler.common.graph.DAG;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.task.conditions.ConditionsParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
import org.apache.dolphinscheduler.dao.entity.Schedule;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
import org.apache.dolphinscheduler.remote.NettyRemotingClient;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.utils.AlertManager;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.quartz.cron.CronUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import static org.apache.dolphinscheduler.common.Constants.*;
/**
* master exec thread,split dag
*/
public class MasterExecThread implements Runnable {
/**
* logger of MasterExecThread
*/
private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class);
/**
* process instance
*/
private ProcessInstance processInstance;
/**
* runing TaskNode
*/
private final Map<MasterBaseTaskExecThread,Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>();
/**
* task exec service
*/
private final ExecutorService taskExecService;
/**
* submit failure nodes
*/
private boolean taskFailedSubmit = false;
/**
* recover node id list
*/
private List<TaskInstance> recoverNodeIdList = new ArrayList<>();
/**
* error task list
*/
private Map<String,TaskInstance> errorTaskList = new ConcurrentHashMap<>();
/**
* complete task list
*/
private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>();
/**
* ready to submit task list
*/
private Map<String, TaskInstance> readyToSubmitTaskList = new ConcurrentHashMap<>();
/**
* depend failed task map
*/
private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>();
/**
* forbidden task map
*/
private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>();
/**
* skip task map
*/
private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>();
/**
* recover tolerance fault task list
*/
private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>();
/**
* alert manager
*/
private AlertManager alertManager = new AlertManager();
/**
* the object of DAG
*/
private DAG<String,TaskNode,TaskNodeRelation> dag;
/**
* process service
*/
private ProcessService processService;
/**
* master config
*/
private MasterConfig masterConfig;
/**
*
*/
private NettyRemotingClient nettyRemotingClient;
/**
* constructor of MasterExecThread
* @param processInstance processInstance
* @param processService processService
* @param nettyRemotingClient nettyRemotingClient
*/
public MasterExecThread(ProcessInstance processInstance, ProcessService processService, NettyRemotingClient nettyRemotingClient){
this.processService = processService;
this.processInstance = processInstance;
this.masterConfig = SpringApplicationContext.getBean(MasterConfig.class);
int masterTaskExecNum = masterConfig.getMasterExecTaskNum();
this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread",
masterTaskExecNum);
this.nettyRemotingClient = nettyRemotingClient;
}
@Override
public void run() {
// process instance is null
if (processInstance == null){
logger.info("process instance is not exists");
return;
}
// check to see if it's done
if (processInstance.getState().typeIsFinished()){
logger.info("process instance is done : {}",processInstance.getId());
return;
}
try {
if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()){
// sub process complement data
executeComplementProcess();
}else{
// execute flow
executeProcess();
}
}catch (Exception e){
logger.error("master exec thread exception", e);
logger.error("process execute failed, process id:{}", processInstance.getId());
processInstance.setState(ExecutionStatus.FAILURE);
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
}finally {
taskExecService.shutdown();
// post handle
postHandle();
}
}
/**
* execute process
* @throws Exception exception
*/
private void executeProcess() throws Exception {
prepareProcess();
runProcess();
endProcess();
}
/**
* execute complement process
* @throws Exception exception
*/
private void executeComplementProcess() throws Exception {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE));
Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
processService.saveProcessInstance(processInstance);
// get schedules
int processDefinitionId = processInstance.getProcessDefinitionId();
List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId);
List<Date> listDate = Lists.newLinkedList();
if(!CollectionUtils.isEmpty(schedules)){
for (Schedule schedule : schedules) {
listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab()));
}
}
// get first fire date
Iterator<Date> iterator = null;
Date scheduleDate = null;
if(!CollectionUtils.isEmpty(listDate)) {
iterator = listDate.iterator();
scheduleDate = iterator.next();
processInstance.setScheduleTime(scheduleDate);
processService.updateProcessInstance(processInstance);
}else{
scheduleDate = processInstance.getScheduleTime();
if(scheduleDate == null){
scheduleDate = startDate;
}
}
while(Stopper.isRunning()){
// prepare dag and other info
prepareProcess();
if(dag == null){
logger.error("process {} dag is null, please check out parameters",
processInstance.getId());
processInstance.setState(ExecutionStatus.SUCCESS);
processService.updateProcessInstance(processInstance);
return;
}
// execute process ,waiting for end
runProcess();
// process instance failure ,no more complements
if(!processInstance.getState().typeIsSuccess()){
logger.info("process {} state {}, complement not completely!",
processInstance.getId(), processInstance.getState());
break;
}
// current process instance success ,next execute
if(null == iterator){
// loop by day
scheduleDate = DateUtils.getSomeDay(scheduleDate, 1);
if(scheduleDate.after(endDate)){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
}else{
// loop by schedule date
if(!iterator.hasNext()){
// all success
logger.info("process {} complement completely!", processInstance.getId());
break;
}
scheduleDate = iterator.next();
}
logger.info("process {} start to complement {} data",
processInstance.getId(), DateUtils.dateToString(scheduleDate));
// execute next process instance complement data
processInstance.setScheduleTime(scheduleDate);
if(cmdParam.containsKey(Constants.CMDPARAM_RECOVERY_START_NODE_STRING)){
cmdParam.remove(Constants.CMDPARAM_RECOVERY_START_NODE_STRING);
processInstance.setCommandParam(JSONUtils.toJson(cmdParam));
}
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
taskInstance.setFlag(Flag.NO);
processService.updateTaskInstance(taskInstance);
}
processInstance.setState(ExecutionStatus.RUNNING_EXEUTION);
processInstance.setGlobalParams(ParameterUtils.curingGlobalParams(
processInstance.getProcessDefinition().getGlobalParamMap(),
processInstance.getProcessDefinition().getGlobalParamList(),
CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime()));
processService.saveProcessInstance(processInstance);
}
// flow end
endProcess();
}
/**
* prepare process parameter
* @throws Exception exception
*/
private void prepareProcess() throws Exception {
// init task queue
initTaskQueue();
// gen process dag
buildFlowDag();
logger.info("prepare process :{} end", processInstance.getId());
}
/**
* process end handle
*/
private void endProcess() {
processInstance.setEndTime(new Date());
processService.updateProcessInstance(processInstance);
if(processInstance.getState().typeIsWaittingThread()){
processService.createRecoveryWaitingThreadCommand(null, processInstance);
}
List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId());
alertManager.sendAlertProcessInstance(processInstance, taskInstances);
}
/**
* generate process dag
* @throws Exception exception
*/
private void buildFlowDag() throws Exception {
recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam());
forbiddenTaskList = DagHelper.getForbiddenTaskNodeMaps(processInstance.getProcessInstanceJson());
// generate process to get DAG info
List<String> recoveryNameList = getRecoveryNodeNameList();
List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam());
ProcessDag processDag = generateFlowDag(processInstance.getProcessInstanceJson(),
startNodeNameList, recoveryNameList, processInstance.getTaskDependType());
if(processDag == null){
logger.error("processDag is null");
return;
}
// generate process dag
dag = DagHelper.buildDagGraph(processDag);
}
/**
* init task queue
*/
private void initTaskQueue(){
taskFailedSubmit = false;
activeTaskNode.clear();
dependFailedTask.clear();
completeTaskList.clear();
errorTaskList.clear();
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId());
for(TaskInstance task : taskInstanceList){
if(task.isTaskComplete()){
completeTaskList.put(task.getName(), task);
}
if(task.getState().typeIsFailure() && !task.taskCanRetry()){
errorTaskList.put(task.getName(), task);
}
}
}
/**
* process post handle
*/
private void postHandle() {
logger.info("develop mode is: {}", CommonUtils.isDevelopMode());
if (!CommonUtils.isDevelopMode()) {
// get exec dir
String execLocalPath = org.apache.dolphinscheduler.common.utils.FileUtils
.getProcessExecDir(processInstance.getProcessDefinition().getProjectId(),
processInstance.getProcessDefinitionId(),
processInstance.getId());
try {
FileUtils.deleteDirectory(new File(execLocalPath));
} catch (IOException e) {
logger.error("delete exec dir failed ", e);
}
}
}
/**
* submit task to execute
* @param taskInstance task instance
* @return TaskInstance
*/
private TaskInstance submitTaskExec(TaskInstance taskInstance) {
MasterBaseTaskExecThread abstractExecThread = null;
if(taskInstance.isSubProcess()){
abstractExecThread = new SubProcessTaskExecThread(taskInstance);
}else if(taskInstance.isDependTask()){
abstractExecThread = new DependentTaskExecThread(taskInstance);
}else if(taskInstance.isConditionsTask()){
abstractExecThread = new ConditionsTaskExecThread(taskInstance);
}else {
abstractExecThread = new MasterTaskExecThread(taskInstance);
}
Future<Boolean> future = taskExecService.submit(abstractExecThread);
activeTaskNode.putIfAbsent(abstractExecThread, future);
return abstractExecThread.getTaskInstance();
}
/**
* find task instance in db.
* in case submit more than one same name task in the same time.
* @param taskName task name
* @return TaskInstance
*/
private TaskInstance findTaskIfExists(String taskName){
List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId());
for(TaskInstance taskInstance : taskInstanceList){
if(taskInstance.getName().equals(taskName)){
return taskInstance;
}
}
return null;
}
/**
* encapsulation task
* @param processInstance process instance
* @param nodeName node name
* @return TaskInstance
*/
private TaskInstance createTaskInstance(ProcessInstance processInstance, String nodeName,
TaskNode taskNode) {
TaskInstance taskInstance = findTaskIfExists(nodeName);
if(taskInstance == null){
taskInstance = new TaskInstance();
// task name
taskInstance.setName(nodeName);
// process instance define id
taskInstance.setProcessDefinitionId(processInstance.getProcessDefinitionId());
// task instance state
taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS);
// process instance id
taskInstance.setProcessInstanceId(processInstance.getId());
// task instance node json
taskInstance.setTaskJson(JSON.toJSONString(taskNode));
// task instance type
taskInstance.setTaskType(taskNode.getType());
// task instance whether alert
taskInstance.setAlertFlag(Flag.NO);
// task instance start time
taskInstance.setStartTime(new Date());
// task instance flag
taskInstance.setFlag(Flag.YES);
// task instance retry times
taskInstance.setRetryTimes(0);
// max task instance retry times
taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes());
// retry task instance interval
taskInstance.setRetryInterval(taskNode.getRetryInterval());
// task instance priority
if(taskNode.getTaskInstancePriority() == null){
taskInstance.setTaskInstancePriority(Priority.MEDIUM);
}else{
taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority());
}
String processWorkerGroup = processInstance.getWorkerGroup();
processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup;
String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup();
if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) {
taskInstance.setWorkerGroup(processWorkerGroup);
}else {
taskInstance.setWorkerGroup(taskWorkerGroup);
}
}
return taskInstance;
}
/**
* if all of the task dependence are skip, skip it too.
* @param taskNode
* @return
*/
private boolean isTaskNodeNeedSkip(TaskNode taskNode){
if(CollectionUtils.isEmpty(taskNode.getDepList())){
return false;
}
for(String depNode : taskNode.getDepList()){
if(!skipTaskNodeList.containsKey(depNode)){
return false;
}
}
return true;
}
/**
* set task node skip if dependence all skip
* @param taskNodesSkipList
*/
private void setTaskNodeSkip(List<String> taskNodesSkipList){
for(String skipNode : taskNodesSkipList){
skipTaskNodeList.putIfAbsent(skipNode, dag.getNode(skipNode));
Collection<String> postNodeList = DagHelper.getStartVertex(skipNode, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
for(String post : postNodeList){
TaskNode postNode = dag.getNode(post);
if(isTaskNodeNeedSkip(postNode)){
postSkipList.add(post);
}
}
setTaskNodeSkip(postSkipList);
}
}
/**
* parse condition task find the branch process
* set skip flag for another one.
* @param nodeName
* @return
*/
private List<String> parseConditionTask(String nodeName){
List<String> conditionTaskList = new ArrayList<>();
TaskNode taskNode = dag.getNode(nodeName);
if(!taskNode.isConditionsTask()){
return conditionTaskList;
}
ConditionsParameters conditionsParameters =
JSONUtils.parseObject(taskNode.getConditionResult(), ConditionsParameters.class);
TaskInstance taskInstance = completeTaskList.get(nodeName);
if(taskInstance == null){
logger.error("task instance {} cannot find, please check it!", nodeName);
return conditionTaskList;
}
if(taskInstance.getState().typeIsSuccess()){
conditionTaskList = conditionsParameters.getSuccessNode();
setTaskNodeSkip(conditionsParameters.getFailedNode());
}else if(taskInstance.getState().typeIsFailure()){
conditionTaskList = conditionsParameters.getFailedNode();
setTaskNodeSkip(conditionsParameters.getSuccessNode());
}else{
conditionTaskList.add(nodeName);
}
return conditionTaskList;
}
/**
* parse post node list of previous node
* if condition node: return process according to the settings
* if post node completed, return post nodes of the completed node
* @param previousNodeName
* @return
*/
private List<String> parsePostNodeList(String previousNodeName){
List<String> postNodeList = new ArrayList<>();
TaskNode taskNode = dag.getNode(previousNodeName);
if(taskNode != null && taskNode.isConditionsTask()){
return parseConditionTask(previousNodeName);
}
Collection<String> postNodeCollection = DagHelper.getStartVertex(previousNodeName, dag, completeTaskList);
List<String> postSkipList = new ArrayList<>();
// delete success node, parse the past nodes
// if conditions node,
// 1. parse the branch process according the conditions setting
// 2. set skip flag on anther branch process
for(String postNode : postNodeCollection){
if(completeTaskList.containsKey(postNode)){
TaskInstance postTaskInstance = completeTaskList.get(postNode);
if(dag.getNode(postNode).isConditionsTask()){
List<String> conditionTaskNodeList = parseConditionTask(postNode);
for(String conditions : conditionTaskNodeList){
postNodeList.addAll(parsePostNodeList(conditions));
}
}else if(postTaskInstance.getState().typeIsSuccess()){
postNodeList.addAll(parsePostNodeList(postNode));
}else{
postNodeList.add(postNode);
}
}else if(isTaskNodeNeedSkip(dag.getNode(postNode))){
postSkipList.add(postNode);
setTaskNodeSkip(postSkipList);
postSkipList.clear();
}else{
postNodeList.add(postNode);
}
}
return postNodeList;
}
/**
* submit post node
* @param parentNodeName parent node name
*/
private void submitPostNode(String parentNodeName){
List<String> submitTaskNodeList = parsePostNodeList(parentNodeName);
List<TaskInstance> taskInstances = new ArrayList<>();
for(String taskNode : submitTaskNodeList){
taskInstances.add(createTaskInstance(processInstance, taskNode,
dag.getNode(taskNode)));
}
// if previous node success , post node submit
for(TaskInstance task : taskInstances){
if(readyToSubmitTaskList.containsKey(task.getName())){
continue;
}
if(completeTaskList.containsKey(task.getName())){
logger.info("task {} has already run success", task.getName());
continue;
}
if(task.getState().typeIsPause() || task.getState().typeIsCancel()){
logger.info("task {} stopped, the state is {}", task.getName(), task.getState());
}else{
addTaskToStandByList(task);
}
}
}
/**
* determine whether the dependencies of the task node are complete
* @return DependResult
*/
private DependResult isTaskDepsComplete(String taskName) {
Collection<String> startNodes = dag.getBeginNode();
// if vertex,returns true directly
if(startNodes.contains(taskName)){
return DependResult.SUCCESS;
}
TaskNode taskNode = dag.getNode(taskName);
List<String> depNameList = taskNode.getDepList();
for(String depsNode : depNameList ){
if(!dag.containsNode(depsNode)
|| forbiddenTaskList.containsKey(depsNode)
|| skipTaskNodeList.containsKey(depsNode)){
continue;
}
// dependencies must be fully completed
if(!completeTaskList.containsKey(depsNode)){
return DependResult.WAITING;
}
ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState();
// conditions task would not return failed.
if(depTaskState.typeIsFailure()
&& !DagHelper.haveConditionsAfterNode(depsNode, dag )
&& !dag.getNode(depsNode).isConditionsTask()){
return DependResult.FAILED;
}
if(depTaskState.typeIsPause() || depTaskState.typeIsCancel()){
return DependResult.WAITING;
}
}
logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray()));
return DependResult.SUCCESS;
}
/**
* query task instance by complete state
* @param state state
* @return task instance list
*/
private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state){
List<TaskInstance> resultList = new ArrayList<>();
for (Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
if(entry.getValue().getState() == state){
resultList.add(entry.getValue());
}
}
return resultList;
}
/**
* where there are ongoing tasks
* @param state state
* @return ExecutionStatus
*/
private ExecutionStatus runningState(ExecutionStatus state){
if(state == ExecutionStatus.READY_STOP ||
state == ExecutionStatus.READY_PAUSE ||
state == ExecutionStatus.WAITTING_THREAD){
// if the running task is not completed, the state remains unchanged
return state;
}else{
return ExecutionStatus.RUNNING_EXEUTION;
}
}
/**
* exists failure task,contains submit failure、dependency failure,execute failure(retry after)
*
* @return Boolean whether has failed task
*/
private boolean hasFailedTask(){
if(this.taskFailedSubmit){
return true;
}
if(this.errorTaskList.size() > 0){
return true;
}
return this.dependFailedTask.size() > 0;
}
/**
* process instance failure
*
* @return Boolean whether process instance failed
*/
private boolean processFailed(){
if(hasFailedTask()) {
if(processInstance.getFailureStrategy() == FailureStrategy.END){
return true;
}
if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) {
return readyToSubmitTaskList.size() == 0 || activeTaskNode.size() == 0;
}
}
return false;
}
/**
* whether task for waiting thread
* @return Boolean whether has waiting thread task
*/
private boolean hasWaitingThreadTask(){
List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITTING_THREAD);
return CollectionUtils.isNotEmpty(waitingList);
}
/**
* prepare for pause
* 1,failed retry task in the preparation queue , returns to failure directly
* 2,exists pause task,complement not completed, pending submission of tasks, return to suspension
* 3,success
* @return ExecutionStatus
*/
private ExecutionStatus processReadyPause(){
if(hasRetryTaskInStandBy()){
return ExecutionStatus.FAILURE;
}
List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE);
if(CollectionUtils.isNotEmpty(pauseList)
|| !isComplementEnd()
|| readyToSubmitTaskList.size() > 0){
return ExecutionStatus.PAUSE;
}else{
return ExecutionStatus.SUCCESS;
}
}
/**
* generate the latest process instance status by the tasks state
* @return process instance execution status
*/
private ExecutionStatus getProcessInstanceState(){
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
ExecutionStatus state = instance.getState();
if(activeTaskNode.size() > 0 || haveRetryTaskStandBy()){
return runningState(state);
}
// process failure
if(processFailed()){
return ExecutionStatus.FAILURE;
}
// waiting thread
if(hasWaitingThreadTask()){
return ExecutionStatus.WAITTING_THREAD;
}
// pause
if(state == ExecutionStatus.READY_PAUSE){
return processReadyPause();
}
// stop
if(state == ExecutionStatus.READY_STOP){
List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP);
List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL);
if(CollectionUtils.isNotEmpty(stopList)
|| CollectionUtils.isNotEmpty(killList) || !isComplementEnd()){
return ExecutionStatus.STOP;
}else{
return ExecutionStatus.SUCCESS;
}
}
// success
if(state == ExecutionStatus.RUNNING_EXEUTION){
if(readyToSubmitTaskList.size() > 0){
//tasks currently pending submission, no retries, indicating that depend is waiting to complete
return ExecutionStatus.RUNNING_EXEUTION;
}else{
// if the waiting queue is empty and the status is in progress, then success
return ExecutionStatus.SUCCESS;
}
}
return state;
}
/**
* whether standby task list have retry tasks
* @return
*/
private boolean haveRetryTaskStandBy() {
boolean result = false;
for(String taskName : readyToSubmitTaskList.keySet()){
TaskInstance task = readyToSubmitTaskList.get(taskName);
if(task.getState().typeIsFailure()){
result = true;
break;
}
}
return result;
}
/**
* whether complement end
* @return Boolean whether is complement end
*/
private boolean isComplementEnd() {
if(!processInstance.isComplementData()){
return true;
}
try {
Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam());
Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE));
return processInstance.getScheduleTime().equals(endTime);
} catch (Exception e) {
logger.error("complement end failed ",e);
return false;
}
}
/**
* updateProcessInstance process instance state
* after each batch of tasks is executed, the status of the process instance is updated
*/
private void updateProcessInstanceState() {
ExecutionStatus state = getProcessInstanceState();
if(processInstance.getState() != state){
logger.info(
"work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}",
processInstance.getId(), processInstance.getName(),
processInstance.getState(), state,
processInstance.getCommandType());
ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId());
instance.setState(state);
instance.setProcessDefinition(processInstance.getProcessDefinition());
processService.updateProcessInstance(instance);
processInstance = instance;
}
}
/**
* get task dependency result
* @param taskInstance task instance
* @return DependResult
*/
private DependResult getDependResultForTask(TaskInstance taskInstance){
return isTaskDepsComplete(taskInstance.getName());
}
/**
* add task to standby list
* @param taskInstance task instance
*/
private void addTaskToStandByList(TaskInstance taskInstance){
logger.info("add task to stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.putIfAbsent(taskInstance.getName(), taskInstance);
}
/**
* remove task from stand by list
* @param taskInstance task instance
*/
private void removeTaskFromStandbyList(TaskInstance taskInstance){
logger.info("remove task from stand by list: {}", taskInstance.getName());
readyToSubmitTaskList.remove(taskInstance.getName());
}
/**
* has retry task in standby
* @return Boolean whether has retry task in standby
*/
private boolean hasRetryTaskInStandBy(){
for (Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
if(entry.getValue().getState().typeIsFailure()){
return true;
}
}
return false;
}
/**
* submit and watch the tasks, until the work flow stop
*/
private void runProcess(){
// submit start node
submitPostNode(null);
boolean sendTimeWarning = false;
while(!processInstance.isProcessInstanceStop()){
// send warning email if process time out.
if(!sendTimeWarning && checkProcessTimeOut(processInstance) ){
alertManager.sendProcessTimeoutAlert(processInstance,
processService.findProcessDefineById(processInstance.getProcessDefinitionId()));
sendTimeWarning = true;
}
for(Map.Entry<MasterBaseTaskExecThread,Future<Boolean>> entry: activeTaskNode.entrySet()) {
Future<Boolean> future = entry.getValue();
TaskInstance task = entry.getKey().getTaskInstance();
if(!future.isDone()){
continue;
}
// node monitor thread complete
task = this.processService.findTaskInstanceById(task.getId());
if(task == null){
this.taskFailedSubmit = true;
activeTaskNode.remove(entry.getKey());
continue;
}
// node monitor thread complete
if(task.getState().typeIsFinished()){
activeTaskNode.remove(entry.getKey());
}
logger.info("task :{}, id:{} complete, state is {} ",
task.getName(), task.getId(), task.getState());
// node success , post node submit
if(task.getState() == ExecutionStatus.SUCCESS){
completeTaskList.put(task.getName(), task);
submitPostNode(task.getName());
continue;
}
// node fails, retry first, and then execute the failure process
if(task.getState().typeIsFailure()){
if(task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE){
this.recoverToleranceFaultTaskList.add(task);
}
if(task.taskCanRetry()){
addTaskToStandByList(task);
}else{
completeTaskList.put(task.getName(), task);
if( task.isConditionsTask()
|| DagHelper.haveConditionsAfterNode(task.getName(), dag)) {
submitPostNode(task.getName());
}else{
errorTaskList.put(task.getName(), task);
if(processInstance.getFailureStrategy() == FailureStrategy.END){
killTheOtherTasks();
}
}
}
continue;
}
// other status stop/pause
completeTaskList.put(task.getName(), task);
}
// send alert
if(CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)){
alertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList);
this.recoverToleranceFaultTaskList.clear();
}
// updateProcessInstance completed task status
// failure priority is higher than pause
// if a task fails, other suspended tasks need to be reset kill
if(errorTaskList.size() > 0){
for(Map.Entry<String, TaskInstance> entry: completeTaskList.entrySet()) {
TaskInstance completeTask = entry.getValue();
if(completeTask.getState()== ExecutionStatus.PAUSE){
completeTask.setState(ExecutionStatus.KILL);
completeTaskList.put(entry.getKey(), completeTask);
processService.updateTaskInstance(completeTask);
}
}
}
if(canSubmitTaskToQueue()){
submitStandByTask();
}
try {
Thread.sleep(Constants.SLEEP_TIME_MILLIS);
} catch (InterruptedException e) {
logger.error(e.getMessage(),e);
}
updateProcessInstanceState();
}
logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState());
}
/**
* whether check process time out
* @param processInstance task instance
* @return true if time out of process instance > running time of process instance
*/
private boolean checkProcessTimeOut(ProcessInstance processInstance) {
if(processInstance.getTimeout() == 0 ){
return false;
}
Date now = new Date();
long runningTime = DateUtils.diffMin(now, processInstance.getStartTime());
return runningTime > processInstance.getTimeout();
}
/**
* whether can submit task to queue
* @return boolean
*/
private boolean canSubmitTaskToQueue() {
return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory());
}
/**
* close the on going tasks
*/
private void killTheOtherTasks() {
logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(),
activeTaskNode.size());
for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) {
MasterBaseTaskExecThread taskExecThread = entry.getKey();
Future<Boolean> future = entry.getValue();
TaskInstance taskInstance = taskExecThread.getTaskInstance();
taskInstance = processService.findTaskInstanceById(taskInstance.getId());
if(taskInstance != null && taskInstance.getState().typeIsFinished()){
continue;
}
if (!future.isDone()) {
// record kill info
logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId());
// kill node
taskExecThread.kill();
}
}
}
/**
* whether the retry interval is timed out
* @param taskInstance task instance
* @return Boolean
*/
private boolean retryTaskIntervalOverTime(TaskInstance taskInstance){
if(taskInstance.getState() != ExecutionStatus.FAILURE){
return true;
}
if(taskInstance.getId() == 0 ||
taskInstance.getMaxRetryTimes() ==0 ||
taskInstance.getRetryInterval() == 0 ){
return true;
}
Date now = new Date();
long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime());
// task retry does not over time, return false
return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval;
}
/**
* handling the list of tasks to be submitted
*/
private void submitStandByTask(){
for(Map.Entry<String, TaskInstance> entry: readyToSubmitTaskList.entrySet()) {
TaskInstance task = entry.getValue();
DependResult dependResult = getDependResultForTask(task);
if(DependResult.SUCCESS == dependResult){
if(retryTaskIntervalOverTime(task)){
submitTaskExec(task);
removeTaskFromStandbyList(task);
}
}else if(DependResult.FAILED == dependResult){
// if the dependency fails, the current node is not submitted and the state changes to failure.
dependFailedTask.put(entry.getKey(), task);
removeTaskFromStandbyList(task);
logger.info("task {},id:{} depend result : {}",task.getName(), task.getId(), dependResult);
}
}
}
/**
* get recovery task instance
* @param taskId task id
* @return recovery task instance
*/
private TaskInstance getRecoveryTaskInstance(String taskId){
if(!StringUtils.isNotEmpty(taskId)){
return null;
}
try {
Integer intId = Integer.valueOf(taskId);
TaskInstance task = processService.findTaskInstanceById(intId);
if(task == null){
logger.error("start node id cannot be found: {}", taskId);
}else {
return task;
}
}catch (Exception e){
logger.error("get recovery task instance failed ",e);
}
return null;
}
/**
* get start task instance list
* @param cmdParam command param
* @return task instance list
*/
private List<TaskInstance> getStartTaskInstanceList(String cmdParam){
List<TaskInstance> instanceList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap != null && paramMap.containsKey(CMDPARAM_RECOVERY_START_NODE_STRING)){
String[] idList = paramMap.get(CMDPARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA);
for(String nodeId : idList){
TaskInstance task = getRecoveryTaskInstance(nodeId);
if(task != null){
instanceList.add(task);
}
}
}
return instanceList;
}
/**
* parse "StartNodeNameList" from cmd param
* @param cmdParam command param
* @return start node name list
*/
private List<String> parseStartNodeName(String cmdParam){
List<String> startNodeNameList = new ArrayList<>();
Map<String, String> paramMap = JSONUtils.toMap(cmdParam);
if(paramMap == null){
return startNodeNameList;
}
if(paramMap.containsKey(CMDPARAM_START_NODE_NAMES)){
startNodeNameList = Arrays.asList(paramMap.get(CMDPARAM_START_NODE_NAMES).split(Constants.COMMA));
}
return startNodeNameList;
}
/**
* generate start node name list from parsing command param;
* if "StartNodeIdList" exists in command param, return StartNodeIdList
* @return recovery node name list
*/
private List<String> getRecoveryNodeNameList(){
List<String> recoveryNodeNameList = new ArrayList<>();
if(CollectionUtils.isNotEmpty(recoverNodeIdList)) {
for (TaskInstance task : recoverNodeIdList) {
recoveryNodeNameList.add(task.getName());
}
}
return recoveryNodeNameList;
}
/**
* generate flow dag
* @param processDefinitionJson process definition json
* @param startNodeNameList start node name list
* @param recoveryNodeNameList recovery node name list
* @param depNodeType depend node type
* @return ProcessDag process dag
* @throws Exception exception
*/
public ProcessDag generateFlowDag(String processDefinitionJson,
List<String> startNodeNameList,
List<String> recoveryNodeNameList,
TaskDependType depNodeType)throws Exception{
return DagHelper.generateFlowDag(processDefinitionJson, startNodeNameList, recoveryNodeNameList, depNodeType);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/config/MasterConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.config;
import org.apache.dolphinscheduler.common.Constants;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.PropertySource;
import org.springframework.stereotype.Component;
@Component
@PropertySource(value = "master.properties")
public class MasterConfig {
@Value("${master.exec.threads:100}")
private int masterExecThreads;
@Value("${master.exec.task.num:20}")
private int masterExecTaskNum;
@Value("${master.heartbeat.interval:10}")
private int masterHeartbeatInterval;
@Value("${master.task.commit.retryTimes:5}")
private int masterTaskCommitRetryTimes;
@Value("${master.task.commit.interval:1000}")
private int masterTaskCommitInterval;
@Value("${master.max.cpuload.avg:-1}")
private double masterMaxCpuloadAvg;
@Value("${master.reserved.memory:0.3}")
private double masterReservedMemory;
@Value("${master.host.selector:lowerWeight}")
private String hostSelector;
@Value("${master.listen.port:5678}")
private int listenPort;
public int getListenPort() {
return listenPort;
}
public void setListenPort(int listenPort) {
this.listenPort = listenPort;
}
public String getHostSelector() {
return hostSelector;
}
public void setHostSelector(String hostSelector) {
this.hostSelector = hostSelector;
}
public int getMasterExecThreads() {
return masterExecThreads;
}
public void setMasterExecThreads(int masterExecThreads) {
this.masterExecThreads = masterExecThreads;
}
public int getMasterExecTaskNum() {
return masterExecTaskNum;
}
public void setMasterExecTaskNum(int masterExecTaskNum) {
this.masterExecTaskNum = masterExecTaskNum;
}
public int getMasterHeartbeatInterval() {
return masterHeartbeatInterval;
}
public void setMasterHeartbeatInterval(int masterHeartbeatInterval) {
this.masterHeartbeatInterval = masterHeartbeatInterval;
}
public int getMasterTaskCommitRetryTimes() {
return masterTaskCommitRetryTimes;
}
public void setMasterTaskCommitRetryTimes(int masterTaskCommitRetryTimes) {
this.masterTaskCommitRetryTimes = masterTaskCommitRetryTimes;
}
public int getMasterTaskCommitInterval() {
return masterTaskCommitInterval;
}
public void setMasterTaskCommitInterval(int masterTaskCommitInterval) {
this.masterTaskCommitInterval = masterTaskCommitInterval;
}
public double getMasterMaxCpuloadAvg() {
if (masterMaxCpuloadAvg == -1){
return Constants.DEFAULT_MASTER_CPU_LOAD;
}
return masterMaxCpuloadAvg;
}
public void setMasterMaxCpuloadAvg(double masterMaxCpuloadAvg) {
this.masterMaxCpuloadAvg = masterMaxCpuloadAvg;
}
public double getMasterReservedMemory() {
return masterReservedMemory;
}
public void setMasterReservedMemory(double masterReservedMemory) {
this.masterReservedMemory = masterReservedMemory;
}
} |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.consumer;
import com.alibaba.fastjson.JSONObject;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.enums.UdfType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.datax.DataxParameters;
import org.apache.dolphinscheduler.common.task.procedure.ProcedureParameters;
import org.apache.dolphinscheduler.common.task.sql.SqlParameters;
import org.apache.dolphinscheduler.common.task.sqoop.SqoopParameters;
import org.apache.dolphinscheduler.common.task.sqoop.sources.SourceMysqlParameter;
import org.apache.dolphinscheduler.common.task.sqoop.targets.TargetMysqlParameter;
import org.apache.dolphinscheduler.common.thread.Stopper;
import org.apache.dolphinscheduler.common.thread.ThreadUtils;
import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.server.builder.TaskExecutionContextBuilder;
import org.apache.dolphinscheduler.server.entity.*;
import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.context.ExecutionContext;
import org.apache.dolphinscheduler.server.master.dispatch.enums.ExecutorType;
import org.apache.dolphinscheduler.server.master.dispatch.exceptions.ExecuteException;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.dolphinscheduler.common.Constants.SLEEP_TIME_MILLIS;
/**
* TaskUpdateQueue consumer
*/
@Component
public class TaskPriorityQueueConsumer extends Thread{
/**
* logger of TaskUpdateQueueConsumer
*/
private static final Logger logger = LoggerFactory.getLogger(TaskPriorityQueueConsumer.class);
/**
* taskUpdateQueue
*/
@Autowired
private TaskPriorityQueue taskPriorityQueue;
/**
* processService
*/
@Autowired
private ProcessService processService;
/**
* executor dispatcher
*/
@Autowired
private ExecutorDispatcher dispatcher;
@PostConstruct
public void init(){
super.setName("TaskUpdateQueueConsumerThread");
super.start();
}
@Override
public void run() {
while (Stopper.isRunning()){
try {
// if not task , blocking here
String taskPriorityInfo = taskPriorityQueue.take();
TaskPriority taskPriority = TaskPriority.of(taskPriorityInfo);
dispatch(taskPriority.getTaskId());
}catch (Exception e){
logger.error("dispatcher task error",e);
}
}
}
/**
* dispatch task
*
* @param taskInstanceId taskInstanceId
* @return result
*/
private Boolean dispatch(int taskInstanceId){
TaskExecutionContext context = getTaskExecutionContext(taskInstanceId);
ExecutionContext executionContext = new ExecutionContext(context.toCommand(), ExecutorType.WORKER, context.getWorkerGroup());
Boolean result = false;
while (Stopper.isRunning()){
try {
result = dispatcher.dispatch(executionContext);
} catch (ExecuteException e) {
logger.error("dispatch error",e);
ThreadUtils.sleep(SLEEP_TIME_MILLIS);
}
if (result || taskInstanceIsFinalState(taskInstanceId)){
break;
}
}
return result;
}
/**
* taskInstance is final state
* success,failure,kill,stop,pause,threadwaiting is final state
* @param taskInstanceId taskInstanceId
* @return taskInstance is final state
*/
public Boolean taskInstanceIsFinalState(int taskInstanceId){
TaskInstance taskInstance = processService.findTaskInstanceById(taskInstanceId);
return taskInstance.getState().typeIsFinished();
}
/**
* get TaskExecutionContext
* @param taskInstanceId taskInstanceId
* @return TaskExecutionContext
*/
protected TaskExecutionContext getTaskExecutionContext(int taskInstanceId){
TaskInstance taskInstance = processService.getTaskInstanceDetailByTaskId(taskInstanceId);
// task type
TaskType taskType = TaskType.valueOf(taskInstance.getTaskType());
// task node
TaskNode taskNode = JSONObject.parseObject(taskInstance.getTaskJson(), TaskNode.class);
Integer userId = taskInstance.getProcessDefine() == null ? 0 : taskInstance.getProcessDefine().getUserId();
Tenant tenant = processService.getTenantForProcess(taskInstance.getProcessInstance().getTenantId(), userId);
// verify tenant is null
if (verifyTenantIsNull(tenant, taskInstance)) {
processService.changeTaskState(ExecutionStatus.FAILURE,
taskInstance.getStartTime(),
taskInstance.getHost(),
null,
null,
taskInstance.getId());
return null;
}
// set queue for process instance, user-specified queue takes precedence over tenant queue
String userQueue = processService.queryUserQueueByProcessInstanceId(taskInstance.getProcessInstanceId());
taskInstance.getProcessInstance().setQueue(StringUtils.isEmpty(userQueue) ? tenant.getQueue() : userQueue);
taskInstance.getProcessInstance().setTenantCode(tenant.getTenantCode());
taskInstance.setExecutePath(getExecLocalPath(taskInstance));
taskInstance.setResources(getResourceFullNames(taskNode));
SQLTaskExecutionContext sqlTaskExecutionContext = new SQLTaskExecutionContext();
DataxTaskExecutionContext dataxTaskExecutionContext = new DataxTaskExecutionContext();
ProcedureTaskExecutionContext procedureTaskExecutionContext = new ProcedureTaskExecutionContext();
SqoopTaskExecutionContext sqoopTaskExecutionContext = new SqoopTaskExecutionContext();
// SQL task
if (taskType == TaskType.SQL){
setSQLTaskRelation(sqlTaskExecutionContext, taskNode);
}
// DATAX task
if (taskType == TaskType.DATAX){
setDataxTaskRelation(dataxTaskExecutionContext, taskNode);
}
// procedure task
if (taskType == TaskType.PROCEDURE){
setProcedureTaskRelation(procedureTaskExecutionContext, taskNode);
}
if (taskType == TaskType.SQOOP){
setSqoopTaskRelation(sqoopTaskExecutionContext,taskNode);
}
return TaskExecutionContextBuilder.get()
.buildTaskInstanceRelatedInfo(taskInstance)
.buildProcessInstanceRelatedInfo(taskInstance.getProcessInstance())
.buildProcessDefinitionRelatedInfo(taskInstance.getProcessDefine())
.buildSQLTaskRelatedInfo(sqlTaskExecutionContext)
.buildDataxTaskRelatedInfo(dataxTaskExecutionContext)
.buildProcedureTaskRelatedInfo(procedureTaskExecutionContext)
.buildSqoopTaskRelatedInfo(sqoopTaskExecutionContext)
.create();
}
/**
* set procedure task relation
* @param procedureTaskExecutionContext procedureTaskExecutionContext
* @param taskNode taskNode
*/
private void setProcedureTaskRelation(ProcedureTaskExecutionContext procedureTaskExecutionContext, TaskNode taskNode) {
ProcedureParameters procedureParameters = JSONObject.parseObject(taskNode.getParams(), ProcedureParameters.class);
int datasourceId = procedureParameters.getDatasource();
DataSource datasource = processService.findDataSourceById(datasourceId);
procedureTaskExecutionContext.setConnectionParams(datasource.getConnectionParams());
}
/**
* set datax task relation
* @param dataxTaskExecutionContext dataxTaskExecutionContext
* @param taskNode taskNode
*/
private void setDataxTaskRelation(DataxTaskExecutionContext dataxTaskExecutionContext, TaskNode taskNode) {
DataxParameters dataxParameters = JSONObject.parseObject(taskNode.getParams(), DataxParameters.class);
DataSource dataSource = processService.findDataSourceById(dataxParameters.getDataSource());
DataSource dataTarget = processService.findDataSourceById(dataxParameters.getDataTarget());
if (dataSource != null){
dataxTaskExecutionContext.setDataSourceId(dataxParameters.getDataSource());
dataxTaskExecutionContext.setSourcetype(dataSource.getType().getCode());
dataxTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams());
}
if (dataTarget != null){
dataxTaskExecutionContext.setDataTargetId(dataxParameters.getDataTarget());
dataxTaskExecutionContext.setTargetType(dataTarget.getType().getCode());
dataxTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams());
}
}
/**
* set datax task relation
* @param sqoopTaskExecutionContext sqoopTaskExecutionContext
* @param taskNode taskNode
*/
private void setSqoopTaskRelation(SqoopTaskExecutionContext sqoopTaskExecutionContext, TaskNode taskNode) {
SqoopParameters sqoopParameters = JSONObject.parseObject(taskNode.getParams(), SqoopParameters.class);
SourceMysqlParameter sourceMysqlParameter = JSONUtils.parseObject(sqoopParameters.getSourceParams(), SourceMysqlParameter.class);
TargetMysqlParameter targetMysqlParameter = JSONUtils.parseObject(sqoopParameters.getTargetParams(), TargetMysqlParameter.class);
DataSource dataSource = processService.findDataSourceById(sourceMysqlParameter.getSrcDatasource());
DataSource dataTarget = processService.findDataSourceById(targetMysqlParameter.getTargetDatasource());
if (dataSource != null){
sqoopTaskExecutionContext.setDataSourceId(dataSource.getId());
sqoopTaskExecutionContext.setSourcetype(dataSource.getType().getCode());
sqoopTaskExecutionContext.setSourceConnectionParams(dataSource.getConnectionParams());
}
if (dataTarget != null){
sqoopTaskExecutionContext.setDataTargetId(dataTarget.getId());
sqoopTaskExecutionContext.setTargetType(dataTarget.getType().getCode());
sqoopTaskExecutionContext.setTargetConnectionParams(dataTarget.getConnectionParams());
}
}
/**
* set SQL task relation
* @param sqlTaskExecutionContext sqlTaskExecutionContext
* @param taskNode taskNode
*/
private void setSQLTaskRelation(SQLTaskExecutionContext sqlTaskExecutionContext, TaskNode taskNode) {
SqlParameters sqlParameters = JSONObject.parseObject(taskNode.getParams(), SqlParameters.class);
int datasourceId = sqlParameters.getDatasource();
DataSource datasource = processService.findDataSourceById(datasourceId);
sqlTaskExecutionContext.setConnectionParams(datasource.getConnectionParams());
// whether udf type
boolean udfTypeFlag = EnumUtils.isValidEnum(UdfType.class, sqlParameters.getType())
&& StringUtils.isNotEmpty(sqlParameters.getUdfs());
if (udfTypeFlag){
String[] udfFunIds = sqlParameters.getUdfs().split(",");
int[] udfFunIdsArray = new int[udfFunIds.length];
for(int i = 0 ; i < udfFunIds.length;i++){
udfFunIdsArray[i]=Integer.parseInt(udfFunIds[i]);
}
List<UdfFunc> udfFuncList = processService.queryUdfFunListByids(udfFunIdsArray);
sqlTaskExecutionContext.setUdfFuncList(udfFuncList);
}
}
/**
* get execute local path
*
* @return execute local path
*/
private String getExecLocalPath(TaskInstance taskInstance){
return FileUtils.getProcessExecDir(taskInstance.getProcessDefine().getProjectId(),
taskInstance.getProcessDefine().getId(),
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
}
/**
* whehter tenant is null
* @param tenant tenant
* @param taskInstance taskInstance
* @return result
*/
private boolean verifyTenantIsNull(Tenant tenant, TaskInstance taskInstance) {
if(tenant == null){
logger.error("tenant not exists,process instance id : {},task instance id : {}",
taskInstance.getProcessInstance().getId(),
taskInstance.getId());
return true;
}
return false;
}
/**
* get resource full name list
*/
private List<String> getResourceFullNames(TaskNode taskNode) {
List<String> resourceFullNameList = new ArrayList<>();
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
if (baseParam != null) {
List<ResourceInfo> projectResourceFiles = baseParam.getResourceFilesList();
if (projectResourceFiles != null) {
// filter the resources that the resource id equals 0
Set<ResourceInfo> oldVersionResources = projectResourceFiles.stream().filter(t -> t.getId() == 0).collect(Collectors.toSet());
if (CollectionUtils.isNotEmpty(oldVersionResources)) {
resourceFullNameList.addAll(oldVersionResources.stream().map(resource -> resource.getRes()).collect(Collectors.toSet()));
}
// get the resource id in order to get the resource names in batch
Stream<Integer> resourceIdStream = projectResourceFiles.stream().map(resourceInfo -> resourceInfo.getId());
Set<Integer> resourceIdsSet = resourceIdStream.collect(Collectors.toSet());
if (CollectionUtils.isNotEmpty(resourceIdsSet)) {
Integer[] resourceIds = resourceIdsSet.toArray(new Integer[resourceIdsSet.size()]);
List<Resource> resources = processService.listResourceByIds(resourceIds);
resourceFullNameList.addAll(resources.stream()
.map(resourceInfo -> resourceInfo.getFullName())
.collect(Collectors.toList()));
}
}
}
return resourceFullNameList;
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/main/resources/master.properties | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# master execute thread num
#master.exec.threads=100
# master execute task number in parallel
#master.exec.task.num=20
# master heartbeat interval
#master.heartbeat.interval=10
# master commit task retry times
#master.task.commit.retryTimes=5
# master commit task interval
#master.task.commit.interval=1000
# only less than cpu avg load, master server can work. default value -1 : the number of cpu cores * 2
#master.max.cpuload.avg=-1
# only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G.
#master.reserved.memory=0.3
# master listen port
#master.listen.port=5678 |
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/consumer/TaskPriorityQueueConsumerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.consumer;
import org.apache.dolphinscheduler.common.enums.CommandType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
import org.apache.dolphinscheduler.common.enums.Priority;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.registry.DependencyConfig;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.zk.SpringZKServer;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.util.Date;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class,
NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, TaskPriorityQueueConsumer.class,
ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class})
public class TaskPriorityQueueConsumerTest {
@Autowired
private TaskPriorityQueue taskPriorityQueue;
@Autowired
private TaskPriorityQueueConsumer taskPriorityQueueConsumer;
@Autowired
private ProcessService processService;
@Autowired
private ExecutorDispatcher dispatcher;
@Before
public void init(){
Tenant tenant = new Tenant();
tenant.setId(1);
tenant.setTenantCode("journey");
tenant.setTenantName("journey");
tenant.setDescription("journey");
tenant.setQueueId(1);
tenant.setCreateTime(new Date());
tenant.setUpdateTime(new Date());
Mockito.when(processService.getTenantForProcess(1,2)).thenReturn(tenant);
Mockito.when(processService.queryUserQueueByProcessInstanceId(1)).thenReturn("default");
}
@Test
public void testSHELLTask() throws Exception {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(1);
taskInstance.setTaskType("SHELL");
taskInstance.setProcessDefinitionId(1);
taskInstance.setProcessInstanceId(1);
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}");
taskInstance.setProcessInstancePriority(Priority.MEDIUM);
taskInstance.setWorkerGroup("default");
taskInstance.setExecutorId(2);
ProcessInstance processInstance = new ProcessInstance();
processInstance.setTenantId(1);
processInstance.setCommandType(CommandType.START_PROCESS);
taskInstance.setProcessInstance(processInstance);
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setUserId(2);
processDefinition.setProjectId(1);
taskInstance.setProcessDefine(processDefinition);
Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance);
taskPriorityQueue.put("2_1_2_1_default");
Thread.sleep(10000);
}
@Test
public void testSQLTask() throws Exception {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(1);
taskInstance.setTaskType("SQL");
taskInstance.setProcessDefinitionId(1);
taskInstance.setProcessInstanceId(1);
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setTaskJson("{\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-3655\",\"maxRetryTimes\":0,\"name\":\"UDF测试\",\"params\":\"{\\\"postStatements\\\":[],\\\"connParams\\\":\\\"\\\",\\\"receiversCc\\\":\\\"\\\",\\\"udfs\\\":\\\"1\\\",\\\"type\\\":\\\"HIVE\\\",\\\"title\\\":\\\"test\\\",\\\"sql\\\":\\\"select id,name,ds,zodia(ds) from t_journey_user\\\",\\\"preStatements\\\":[],\\\"sqlType\\\":0,\\\"receivers\\\":\\\"[email protected]\\\",\\\"datasource\\\":3,\\\"showType\\\":\\\"TABLE\\\",\\\"localParams\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQL\"}");
taskInstance.setProcessInstancePriority(Priority.MEDIUM);
taskInstance.setWorkerGroup("default");
taskInstance.setExecutorId(2);
ProcessInstance processInstance = new ProcessInstance();
processInstance.setTenantId(1);
processInstance.setCommandType(CommandType.START_PROCESS);
taskInstance.setProcessInstance(processInstance);
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setUserId(2);
processDefinition.setProjectId(1);
taskInstance.setProcessDefine(processDefinition);
Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance);
taskPriorityQueue.put("2_1_2_1_default");
DataSource dataSource = new DataSource();
dataSource.setId(1);
dataSource.setName("sqlDatasource");
dataSource.setType(DbType.MYSQL);
dataSource.setUserId(2);
dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}");
dataSource.setCreateTime(new Date());
dataSource.setUpdateTime(new Date());
Mockito.when(processService.findDataSourceById(1)).thenReturn(dataSource);
Thread.sleep(10000);
}
@Test
public void testDataxTask() throws Exception {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(1);
taskInstance.setTaskType("DATAX");
taskInstance.setProcessDefinitionId(1);
taskInstance.setProcessInstanceId(1);
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-97625\",\"maxRetryTimes\":0,\"name\":\"MySQL数据相互导入\",\"params\":\"{\\\"targetTable\\\":\\\"pv2\\\",\\\"postStatements\\\":[],\\\"jobSpeedRecord\\\":1000,\\\"customConfig\\\":0,\\\"dtType\\\":\\\"MYSQL\\\",\\\"dsType\\\":\\\"MYSQL\\\",\\\"jobSpeedByte\\\":0,\\\"dataSource\\\":80,\\\"dataTarget\\\":80,\\\"sql\\\":\\\"SELECT dt,count FROM pv\\\",\\\"preStatements\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"DATAX\",\"workerGroup\":\"default\"}");
taskInstance.setProcessInstancePriority(Priority.MEDIUM);
taskInstance.setWorkerGroup("default");
taskInstance.setExecutorId(2);
ProcessInstance processInstance = new ProcessInstance();
processInstance.setTenantId(1);
processInstance.setCommandType(CommandType.START_PROCESS);
taskInstance.setProcessInstance(processInstance);
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setUserId(2);
processDefinition.setProjectId(1);
taskInstance.setProcessDefine(processDefinition);
Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance);
taskPriorityQueue.put("2_1_2_1_default");
DataSource dataSource = new DataSource();
dataSource.setId(80);
dataSource.setName("datax");
dataSource.setType(DbType.MYSQL);
dataSource.setUserId(2);
dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}");
dataSource.setCreateTime(new Date());
dataSource.setUpdateTime(new Date());
Mockito.when(processService.findDataSourceById(80)).thenReturn(dataSource);
Thread.sleep(10000);
}
@Test
public void testSqoopTask() throws Exception {
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(1);
taskInstance.setTaskType("SQOOP");
taskInstance.setProcessDefinitionId(1);
taskInstance.setProcessInstanceId(1);
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-63634\",\"maxRetryTimes\":0,\"name\":\"MySQL数据导入HDSF\",\"params\":\"{\\\"sourceType\\\":\\\"MYSQL\\\",\\\"targetType\\\":\\\"HDFS\\\",\\\"targetParams\\\":\\\"{\\\\\\\"targetPath\\\\\\\":\\\\\\\"/test/datatest\\\\\\\",\\\\\\\"deleteTargetDir\\\\\\\":true,\\\\\\\"fileType\\\\\\\":\\\\\\\"--as-textfile\\\\\\\",\\\\\\\"compressionCodec\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"fieldsTerminated\\\\\\\":\\\\\\\",\\\\\\\",\\\\\\\"linesTerminated\\\\\\\":\\\\\\\"\\\\\\\\\\\\\\\\n\\\\\\\"}\\\",\\\"modelType\\\":\\\"import\\\",\\\"sourceParams\\\":\\\"{\\\\\\\"srcType\\\\\\\":\\\\\\\"MYSQL\\\\\\\",\\\\\\\"srcDatasource\\\\\\\":1,\\\\\\\"srcTable\\\\\\\":\\\\\\\"t_ds_user\\\\\\\",\\\\\\\"srcQueryType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcQuerySql\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcColumnType\\\\\\\":\\\\\\\"0\\\\\\\",\\\\\\\"srcColumns\\\\\\\":\\\\\\\"\\\\\\\",\\\\\\\"srcConditionList\\\\\\\":[],\\\\\\\"mapColumnHive\\\\\\\":[],\\\\\\\"mapColumnJava\\\\\\\":[]}\\\",\\\"localParams\\\":[],\\\"concurrency\\\":1}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SQOOP\",\"workerGroup\":\"default\"}");
taskInstance.setProcessInstancePriority(Priority.MEDIUM);
taskInstance.setWorkerGroup("default");
taskInstance.setExecutorId(2);
ProcessInstance processInstance = new ProcessInstance();
processInstance.setTenantId(1);
processInstance.setCommandType(CommandType.START_PROCESS);
taskInstance.setProcessInstance(processInstance);
ProcessDefinition processDefinition = new ProcessDefinition();
processDefinition.setUserId(2);
processDefinition.setProjectId(1);
taskInstance.setProcessDefine(processDefinition);
Mockito.when(processService.getTaskInstanceDetailByTaskId(1)).thenReturn(taskInstance);
taskPriorityQueue.put("2_1_2_1_default");
DataSource dataSource = new DataSource();
dataSource.setId(1);
dataSource.setName("datax");
dataSource.setType(DbType.MYSQL);
dataSource.setUserId(2);
dataSource.setConnectionParams("{\"address\":\"jdbc:mysql://192.168.221.185:3306\",\"database\":\"dolphinscheduler_qiaozhanwei\",\"jdbcUrl\":\"jdbc:mysql://192.168.221.185:3306/dolphinscheduler_qiaozhanwei\",\"user\":\"root\",\"password\":\"root@123\"}");
dataSource.setCreateTime(new Date());
dataSource.setUpdateTime(new Date());
Mockito.when(processService.findDataSourceById(1)).thenReturn(dataSource);
Thread.sleep(10000);
}
@Test
public void testTaskInstanceIsFinalState(){
TaskInstance taskInstance = new TaskInstance();
taskInstance.setId(1);
taskInstance.setTaskType("SHELL");
taskInstance.setProcessDefinitionId(1);
taskInstance.setProcessInstanceId(1);
taskInstance.setState(ExecutionStatus.KILL);
taskInstance.setTaskJson("{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\",\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false,\"id\":\"tasks-55201\",\"maxRetryTimes\":0,\"name\":\"测试任务\",\"params\":\"{\\\"rawScript\\\":\\\"echo \\\\\\\"测试任务\\\\\\\"\\\",\\\"localParams\\\":[],\\\"resourceList\\\":[]}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\",\"taskInstancePriority\":\"MEDIUM\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0},\"timeout\":\"{\\\"enable\\\":false,\\\"strategy\\\":\\\"\\\"}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}");
taskInstance.setProcessInstancePriority(Priority.MEDIUM);
taskInstance.setWorkerGroup("default");
taskInstance.setExecutorId(2);
Mockito.when( processService.findTaskInstanceById(1)).thenReturn(taskInstance);
taskPriorityQueueConsumer.taskInstanceIsFinalState(1);
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/master/runner/MasterTaskExecThreadTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.master.runner;
import junit.framework.Assert;
import org.apache.dolphinscheduler.server.master.config.MasterConfig;
import org.apache.dolphinscheduler.server.master.consumer.TaskPriorityQueueConsumer;
import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.dolphinscheduler.server.registry.DependencyConfig;
import org.apache.dolphinscheduler.server.registry.ZookeeperNodeManager;
import org.apache.dolphinscheduler.server.registry.ZookeeperRegistryCenter;
import org.apache.dolphinscheduler.server.zk.SpringZKServer;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl;
import org.apache.dolphinscheduler.service.zk.ZookeeperCachedOperator;
import org.apache.dolphinscheduler.service.zk.ZookeeperConfig;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.util.HashSet;
import java.util.Set;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes={DependencyConfig.class, SpringApplicationContext.class, SpringZKServer.class,
NettyExecutorManager.class, ExecutorDispatcher.class, ZookeeperRegistryCenter.class, TaskPriorityQueueConsumer.class,
ZookeeperNodeManager.class, ZookeeperCachedOperator.class, ZookeeperConfig.class, MasterConfig.class})
public class MasterTaskExecThreadTest {
@Test
public void testExistsValidWorkerGroup1(){
ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class);
Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(null);
MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null);
masterTaskExecThread.existsValidWorkerGroup("default");
}
@Test
public void testExistsValidWorkerGroup2(){
ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class);
Set<String> workerGorups = new HashSet<>();
workerGorups.add("test1");
workerGorups.add("test2");
Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(workerGorups);
MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null);
masterTaskExecThread.existsValidWorkerGroup("default");
}
@Test
public void testExistsValidWorkerGroup3(){
ZookeeperRegistryCenter zookeeperRegistryCenter = Mockito.mock(ZookeeperRegistryCenter.class);
Set<String> workerGorups = new HashSet<>();
workerGorups.add("test1");
Mockito.when(zookeeperRegistryCenter.getWorkerGroupDirectly()).thenReturn(workerGorups);
Mockito.when(zookeeperRegistryCenter.getWorkerGroupNodesDirectly("test1")).thenReturn(workerGorups);
MasterTaskExecThread masterTaskExecThread = new MasterTaskExecThread(null);
masterTaskExecThread.existsValidWorkerGroup("test1");
}
}
|
closed | apache/dolphinscheduler | https://github.com/apache/dolphinscheduler | 2,762 | [BUG]the master would be blocked when worker group not exists | **Is your feature request related to a problem? Please describe.**
i create a task with a special worker group,
master would be blocked when the worker group does not exists,
In this case, other tasks would not run anymore.
version:
[dev-1.3.0] | https://github.com/apache/dolphinscheduler/issues/2762 | https://github.com/apache/dolphinscheduler/pull/2764 | 7d2441f63468fb4c9e340f0be0bd6844923a9743 | 7a05c007b45ea742e3ed1a7558c1a641515c140c | "2020-05-20T02:17:55Z" | java | "2020-05-21T02:34:14Z" | dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/registry/DependencyConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dolphinscheduler.server.registry;
import org.apache.dolphinscheduler.dao.AlertDao;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.server.master.cache.impl.TaskInstanceCacheManagerImpl;
import org.apache.dolphinscheduler.server.master.dispatch.ExecutorDispatcher;
import org.apache.dolphinscheduler.server.master.dispatch.host.HostManager;
import org.apache.dolphinscheduler.server.master.dispatch.host.RandomHostManager;
import org.apache.dolphinscheduler.server.master.processor.queue.TaskResponseService;
import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService;
import org.apache.dolphinscheduler.service.process.ProcessService;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueue;
import org.apache.dolphinscheduler.service.queue.TaskPriorityQueueImpl;
import org.mockito.Mockito;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* dependency config
*/
@Configuration
public class DependencyConfig {
@Bean
public AlertDao alertDao() {
return new AlertDao();
}
@Bean
public AlertMapper alertMapper() {
return Mockito.mock(AlertMapper.class);
}
@Bean
public UserAlertGroupMapper userAlertGroupMapper() {
return Mockito.mock(UserAlertGroupMapper.class);
}
@Bean
public TaskInstanceCacheManagerImpl taskInstanceCacheManagerImpl(){
return Mockito.mock(TaskInstanceCacheManagerImpl.class);
}
@Bean
public ProcessService processService(){
return Mockito.mock(ProcessService.class);
}
@Bean
public UserMapper userMapper(){
return Mockito.mock(UserMapper.class);
}
@Bean
public ProcessDefinitionMapper processDefineMapper(){
return Mockito.mock(ProcessDefinitionMapper.class);
}
@Bean
public ProcessInstanceMapper processInstanceMapper(){
return Mockito.mock(ProcessInstanceMapper.class);
}
@Bean
public DataSourceMapper dataSourceMapper(){
return Mockito.mock(DataSourceMapper.class);
}
@Bean
public ProcessInstanceMapMapper processInstanceMapMapper(){
return Mockito.mock(ProcessInstanceMapMapper.class);
}
@Bean
public TaskInstanceMapper taskInstanceMapper(){
return Mockito.mock(TaskInstanceMapper.class);
}
@Bean
public CommandMapper commandMapper(){
return Mockito.mock(CommandMapper.class);
}
@Bean
public ScheduleMapper scheduleMapper(){
return Mockito.mock(ScheduleMapper.class);
}
@Bean
public UdfFuncMapper udfFuncMapper(){
return Mockito.mock(UdfFuncMapper.class);
}
@Bean
public ResourceMapper resourceMapper(){
return Mockito.mock(ResourceMapper.class);
}
@Bean
public ErrorCommandMapper errorCommandMapper(){
return Mockito.mock(ErrorCommandMapper.class);
}
@Bean
public TenantMapper tenantMapper(){
return Mockito.mock(TenantMapper.class);
}
@Bean
public ProjectMapper projectMapper(){
return Mockito.mock(ProjectMapper.class);
}
@Bean
public TaskCallbackService taskCallbackService(){
return Mockito.mock(TaskCallbackService.class);
}
@Bean
public HostManager hostManager(){
return new RandomHostManager();
}
@Bean
public TaskResponseService taskResponseService(){
return Mockito.mock(TaskResponseService.class);
}
@Bean
public TaskPriorityQueue taskPriorityQueue(){
return new TaskPriorityQueueImpl();
}
}
|