微信扫码
添加专属顾问
我要投稿
from llmware.agents import LLMfxtext = ("Tesla stock fell 8% in premarket trading after reporting fourth-quarter revenue and profit that ""missed analysts’ estimates. The electric vehicle company also warned that vehicle volume growth in ""2024 'may be notably lower' than last year’s growth rate. Automotive revenue, meanwhile, increased ""just 1% from a year earlier, partly because the EVs were selling for less than they had in the past. ""Tesla implemented steep price cuts in the second half of the year around the world. In a Wednesday ""presentation, the company warned investors that it’s 'currently between two major growth waves.'")# create an agent using LLMfx classagent = LLMfx()# load text to processagent.load_work(text)# load 'models' as 'tools' to be used in analysis processagent.load_tool("sentiment")agent.load_tool("extract")agent.load_tool("topics")agent.load_tool("boolean")# run function calls using different toolsagent.sentiment()agent.topics()agent.extract(params=["company"])agent.extract(params=["automotive revenue growth"])agent.xsum()agent.boolean(params=["is 2024 growth expected to be strong? (explain)"])# at end of processing, show the report that was automatically aggregated by keyreport = agent.show_report()# displays a summary of the activity in the processactivity_summary = agent.activity_summary()# list of the responses gatheredfor i, entries in enumerate(agent.response_list):print("update: response analysis: ", i, entries)output = {"report": report, "activity_summary": activity_summary, "journal": agent.journal}
# This example illustrates a simple contract analysis# using a RAG-optimized LLM running locallyimport osimport refrom llmware.prompts import Prompt, HumanInTheLoopfrom llmware.setup import Setupfrom llmware.configs import LLMWareConfigdef contract_analysis_on_laptop (model_name):#In this scenario, we will:#-- download a set of sample contract files#-- create a Prompt and load a BLING LLM model#-- parse each contract, extract the relevant passages, and pass questions to a local LLM#Main loop - Iterate thru each contract:##1.parse the document in memory (convert from PDF file into text chunks with metadata)#2.filter the parsed text chunks with a "topic" (e.g., "governing law") to extract relevant passages#3.package and assemble the text chunks into a model-ready context#4.ask three key questions for each contract to the LLM#5.print to the screen#6.save the results in both json and csv for furthe processing and review.#Load the llmware sample filesprint (f"\n > Loading the llmware sample files...")sample_files_path = Setup().load_sample_files()contracts_path = os.path.join(sample_files_path,"Agreements")#Query list - these are the 3 main topics and questions that we would like the LLM to analyze for each contractquery_list = {"executive employment agreement": "What are the name of the two parties?","base salary": "What is the executive's base salary?","vacation": "How many vacation days will the executive receive?"}#Load the selected model by name that was passed into the functionprint (f"\n > Loading model {model_name}...")prompter = Prompt().load_model(model_name, temperature=0.0, sample=False)#Main loopfor i, contract in enumerate(os.listdir(contracts_path)):# excluding Mac file artifact (annoying, but fact of life in demos)if contract != ".DS_Store":print("\nAnalyzing contract: ", str(i+1), contract)print("LLM Responses:")for key, value in query_list.items():# step 1 + 2 + 3 above - contract is parsed, text-chunked, filtered by topic key,# ... and then packaged into the promptsource = prompter.add_source_document(contracts_path, contract, query=key)# step 4 above - calling the LLM with 'source' information already packaged into the promptresponses = prompter.prompt_with_source(value, prompt_name="default_with_context")# step 5 above - print out to screenfor r, response in enumerate(responses):print(key, ":", re.sub("[\n]"," ", response["llm_response"]).strip())# We're done with this contract, clear the source from the promptprompter.clear_source_materials()# step 6 above - saving the analysis to jsonl and csv# Save jsonl report to jsonl to /prompt_history folderprint("\nPrompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id))prompter.save_state()# Save csv report that includes the model, response, prompt, and evidence for human-in-the-loop reviewcsv_output = HumanInTheLoop(prompter).export_current_interaction_to_csv()print("csv output saved at:", csv_output)if __name__ == "__main__":# use local cpu model - try the newest - RAG finetune of Phi-3 quantized and packaged in GGUFmodel = "bling-phi-3-gguf"contract_analysis_on_laptop(model)
from llmware.configs import LLMWareConfig LLMWareConfig().set_active_db("sqlite") LLMWareConfig().set_vector_db("chromadb")curl -o docker-compose.yaml https://raw.githubusercontent.com/llmware-ai/llmware/main/docker-compose.yamldocker compose up -dfrom llmware.configs import LLMWareConfigLLMWareConfig().set_active_db("mongo")LLMWareConfig().set_vector_db("milvus")
curl -o docker-compose.yaml https://raw.githubusercontent.com/llmware-ai/llmware/main/docker-compose-pgvector.yamldocker compose up -dfrom llmware.configs import LLMWareConfigLLMWareConfig().set_active_db("postgres")LLMWareConfig().set_vector_db("postgres")
# scripts to deploy other optionscurl -o docker-compose.yaml https://raw.githubusercontent.com/llmware-ai/llmware/main/docker-compose-redis-stack.yaml
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费POC验证,效果达标后再合作。零风险落地应用大模型,已交付160+中大型企业
2025-09-15
2025-09-02
2025-08-05
2025-08-18
2025-08-25
2025-08-25
2025-08-25
2025-09-03
2025-08-20
2025-09-08
2025-10-04
2025-09-30
2025-09-10
2025-09-10
2025-09-03
2025-08-28
2025-08-25
2025-08-20