Marktkapitalisierung: $3.3619T 2.760%
Volumen (24h): $123.1115B 31.710%
  • Marktkapitalisierung: $3.3619T 2.760%
  • Volumen (24h): $123.1115B 31.710%
  • Angst- und Gier-Index:
  • Marktkapitalisierung: $3.3619T 2.760%
Kryptos
Themen
Cryptospedia
Nachricht
Cryptostopics
Videos
Top -Nachrichten
Kryptos
Themen
Cryptospedia
Nachricht
Cryptostopics
Videos
bitcoin
bitcoin

$104624.958266 USD

1.23%

ethereum
ethereum

$2400.526310 USD

-3.31%

tether
tether

$1.000143 USD

-0.01%

xrp
xrp

$2.375789 USD

0.61%

bnb
bnb

$641.909362 USD

-0.09%

solana
solana

$166.682831 USD

-0.28%

usd-coin
usd-coin

$0.999864 USD

0.00%

dogecoin
dogecoin

$0.222645 USD

2.78%

cardano
cardano

$0.737120 USD

-0.79%

tron
tron

$0.263106 USD

-3.66%

sui
sui

$3.791619 USD

0.32%

chainlink
chainlink

$15.304523 USD

-0.64%

avalanche
avalanche

$22.181122 USD

-0.39%

stellar
stellar

$0.284427 USD

-0.95%

hyperliquid
hyperliquid

$26.205797 USD

-0.73%

Nachrichtenartikel zu Kryptowährungen

Kontext effektiv mit dem Modellkontextprotokoll verwalten

Apr 28, 2025 at 02:32 pm

In diesem Tutorial führen wir Sie durch eine praktische Implementierung des Modellkontextprotokolls (MCP), indem wir einen ModellcontextManager erstellen

```python

`` `Python

import torch

Taschenlampe importieren

import numpy as np

Numph als NP importieren

import typing

Typisierung importieren

from dataclasses import dataclass

aus DataClasses importieren Sie DataClass

import time

Importzeit

import gc

GC importieren

from tqdm.notebook import tqdm

von tqdm.notebook import TQDM

from sentence_transformers import SentenceTransformer

Aus Satz_Transformers importieren

from transformers import GPT2Tokenizer, FLAN_T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM

Aus Transformatoren importieren Sie GPT2Tokenizer, flan_t5forconditionalgeneration, autotokenizer, automodelforseq2seqlm

import math

Mathematik importieren

MAX_TOKENS = 8000

Max_tokens = 8000

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

Device = 'CUDA' if fackel.cuda.is_available () else 'cpu' '

NUM_CHUNKS = 50

Num_chunks = 50

CHUNK_SIZE = 100

Chunk_size = 100

RELEVANCE_THRESHOLD = 0.1

Relevance_threshold = 0,1

IMPORTANCE_FACTOR = 1.0

Bedeutung_FACTOR = 1.0

RECENCY_FACTOR = 0.5

Wiederholung_Factor = 0,5

VISUALIZE_CONTEXT = True

Visualisieren_context = true

BATCH_SIZE = 32

Batch_size = 32

class ContextChunk(typing.NamedTuple):

Klassenkontextchunk (typing.namedtuple):

content: str

Inhalt: str

embedding: np.array

Einbettung: NP.Array

importance: float = 1.0

Bedeutung: float = 1,0

timestamp: float = time.time()

Zeitstempel: float = time.time ()

metadata: dict = None

Metadaten: dikte = keine

def __post_init__(self):

def __post_init __ (self):

if self.metadata is None:

Wenn self.metadata keine ist:

self.metadata = {}

self.metadata = {}

class ModelContextManager:

KlassenmodellContextManager:

def __init__(self, context_chunks:typing.List[ContextChunk]=None, max_tokens:int=MAX_TOKENS, token_limit:int=0, gpt2_tokenizer:GPT2Tokenizer=None):

def __init __ (self, context_chunks: typing.list [contextChunk] = none, max_tokens: int = max_tokens, token_limit: int = 0, gpt2_tokenizer: gpt2Tokenizer = keine):

self.max_tokens = max_tokens

self.max_tokens = max_tokens

self.token_limit = token_limit

self.token_limit = token_limit

self.context_chunks = context_chunks or []

self.context_chunks = context_chunks oder []

self.used_tokens = 0

self.used_tokens = 0

self.last_chunk_index = 0

self.last_chunk_index = 0

self.total_chunks = 0

self.total_chunks = 0

if gpt2_tokenizer is None:

Wenn gpt2_tokenizer keine ist:

self.gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

self.gpt2_tokenizer = gpt2Tokenizer.from_Pretrained ("gpt2")

else:

anders:

self.gpt2_tokenizer = gpt2_tokenizer

self.gpt2_tokenizer = gpt2_tokenizer

self.sentence_transformer = SentenceTransformer('all-mpnet-base-v2')

self.Stentence_transformer = simecetransformer ('All-MPNET-Base-V2')

def add_chunk(self, chunk_text:str, importance:float=1.0):

Def add_chunk (self, chunk_text: str, Bedeutung: float = 1.0):

encoded_input = self.gpt2_tokenizer(chunk_text, return_tensors='pt')

coded_input = self.gpt2_tokenizer (chunk_text, return_tensors = 'pt'))

self.used_tokens += int(encoded_input[0].shape[1])

self.used_tokens += int (coded_input [0] .Shape [1])

chunk_embedding = self.sentence_transformer.encode(chunk_text, batch_size=BATCH_SIZE)

chunk_embedding = self.Stence_transformer.encode (chunk_text, batch_size = batch_size)

new_chunk = ContextChunk(content=chunk_text, embedding=chunk_embedding, importance=importance)

new_chunk = contextChunk (content = chunk_text, einbetting = chunk_embedding, Bedeutung = Bedeutung)

self.context_chunks.append(new_chunk)

self.context_chunks.append (new_chunk)

self.last_chunk_index += 1

self.last_chunk_index += 1

self.total_chunks += 1

self.total_chunks += 1

print(f"Added chunk with {int(encoded_input[0].shape[1])} tokens and importance {importance}. Total used tokens: {self.used_tokens}, total chunks: {self.total_chunks}")

print (f "Chunk mit {int (coded_input [0].

def optimize_context_window(self, query:str, min_chunks:int=3):

Def optimize_context_window (self, query: str, min_chunks: int = 3):

if len(self.context_chunks) <= min_chunks:

Wenn len (self.context_chunks) <= min_chunks:

return []

zurückkehren []

query_embedding = self.sentence_transformer.encode(query, batch_size=BATCH_SIZE)

query_embedding = self.Stence_transformer.encode (Abfrage, batch_size = batch_size)

chunks_to_keep = []

Chunks_to_keep = []

remaining_tokens = self.max_tokens - self.used_tokens

restern_tokens = self.max_tokens - self.used_tokens

if remaining_tokens < 0:

Wenn restlich_tokens <0:

print("Warning: token limit exceeded by %s tokens" % -remaining_tokens)

Print ("Warnung: Token -Grenze über % s Tokens" % -Remaining_tokens überschritten)

for i in range(min_chunks, len(self.context_chunks) - 1, -1):

für i in Bereich (min_chunks, len (self.context_chunks) - 1, -1):

chunk = self.context_chunks[i]

chunk = self.context_chunks [i]

if i == len(self.context_chunks) - 1:

Wenn i == len (self.context_chunks) - 1:

chunks_to_keep.append(i)

Chunks_to_keep.append (i)

continue

weitermachen

chunk_importance = chunk.importance * IMPORTANCE_FACTOR

chunk_importance = chunk.importance * sorgeance_factor

chunk_recency = (time.time() - chunk.timestamp) * RECENCY_FACTOR

chunk_recency = (time.time () - chunk.timestamp) * recyc_factor

relevant_scores = np.array([cosine_similarity(chunk.embedding, x) for x in query_embedding])

relevant_scores = np.Array ([coine_similarity (chunk.embedding, x) für x in query_embedding]))

max_relevant_score = np.max(relevant_scores)

max_relevant_score = np.max (relevant_scores)

total_score = chunk_importance + chunk_recency + max_relevant_score

Total_score = Chunk_importance + Chunk_Reczy + max_relevant_score

if total_score >= RELEVANCE_THRESHOLD:

Wenn total_score> = relevance_threshold:

encoded_input = self.gpt2_tokenizer(chunk.content, return_tensors='pt')

coded_input = self.gpt2_tokenizer (chunk.content, return_tensors = 'pt'))

chunk_token_count = int(encoded_input[0].shape[1])

Chunk_Token_Count = int (coded_input [0] .Shape [1])

if remaining_tokens >= chunk_token_count:

Wenn Sie restlich_tokens> = chunk_token_count:

chunks_to_keep.append(i)

Chunks_to_keep.append (i)

remaining_

übrig_

Haftungsausschluss:info@kdj.com

Die bereitgestellten Informationen stellen keine Handelsberatung dar. kdj.com übernimmt keine Verantwortung für Investitionen, die auf der Grundlage der in diesem Artikel bereitgestellten Informationen getätigt werden. Kryptowährungen sind sehr volatil und es wird dringend empfohlen, nach gründlicher Recherche mit Vorsicht zu investieren!

Wenn Sie glauben, dass der auf dieser Website verwendete Inhalt Ihr Urheberrecht verletzt, kontaktieren Sie uns bitte umgehend (info@kdj.com) und wir werden ihn umgehend löschen.

Weitere Artikel veröffentlicht am May 19, 2025