Capitalisation boursière: $2.9581T 0.300%
Volume(24h): $80.0324B 32.770%
  • Capitalisation boursière: $2.9581T 0.300%
  • Volume(24h): $80.0324B 32.770%
  • Indice de peur et de cupidité:
  • Capitalisation boursière: $2.9581T 0.300%
Cryptos
Les sujets
Cryptospedia
Nouvelles
Cryptosopique
Vidéos
Top nouvelles
Cryptos
Les sujets
Cryptospedia
Nouvelles
Cryptosopique
Vidéos
bitcoin
bitcoin

$93799.023048 USD

-0.60%

ethereum
ethereum

$1777.401774 USD

-1.97%

tether
tether

$1.000343 USD

-0.03%

xrp
xrp

$2.252855 USD

3.38%

bnb
bnb

$602.185977 USD

0.02%

solana
solana

$146.346959 USD

-0.63%

usd-coin
usd-coin

$1.000013 USD

-0.01%

dogecoin
dogecoin

$0.177703 USD

-1.16%

cardano
cardano

$0.697358 USD

-1.11%

tron
tron

$0.245113 USD

-2.74%

sui
sui

$3.522709 USD

-2.79%

chainlink
chainlink

$14.667769 USD

-0.49%

avalanche
avalanche

$21.472475 USD

-3.60%

stellar
stellar

$0.284731 USD

-2.25%

unus-sed-leo
unus-sed-leo

$9.077708 USD

0.32%

Articles d’actualité sur les crypto-monnaies

Gérer efficacement le contexte avec le protocole de contexte du modèle

Apr 28, 2025 at 02:32 pm

Dans ce didacticiel, nous vous guidons à travers une implémentation pratique du Protocole de contexte du modèle (MCP) en construisant un ModelContextManager

```python

`` Python

import torch

Importer une torche

import numpy as np

Importer Numpy comme NP

import typing

Typage d'importation

from dataclasses import dataclass

à partir des classes de données importent la classe de données

import time

heure d'importation

import gc

Importer GC

from tqdm.notebook import tqdm

De TQDM.NOTEBOOK IMPORT TQDM

from sentence_transformers import SentenceTransformer

à partir de phrase_transformateurs Importer SentenTentransformateur

from transformers import GPT2Tokenizer, FLAN_T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM

De Transformers Import GPT2TOKIZIZE, Flan_T5ForConditionalGengeneration, AutotOKINIZIZE, AutomodelorseQ2Seqlm

import math

mathématiques d'importation

MAX_TOKENS = 8000

Max_tokens = 8000

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

Device = 'cuda' si torch.cuda.is_available () else 'CPU'

NUM_CHUNKS = 50

Num_chunks = 50

CHUNK_SIZE = 100

Chunk_size = 100

RELEVANCE_THRESHOLD = 0.1

Pertiance_threshold = 0,1

IMPORTANCE_FACTOR = 1.0

Importance_factor = 1.0

RECENCY_FACTOR = 0.5

Recence_factor = 0,5

VISUALIZE_CONTEXT = True

Visualize_Context = true

BATCH_SIZE = 32

Batch_size = 32

class ContextChunk(typing.NamedTuple):

Class contextChunk (typing.NamedTuple):

content: str

Contenu: str

embedding: np.array

intégrer: np.array

importance: float = 1.0

Importance: float = 1.0

timestamp: float = time.time()

Timestamp: float = time.time ()

metadata: dict = None

métadonnées: dict = aucun

def __post_init__(self):

def __post_init __ (self):

if self.metadata is None:

Si self.Metadata n'est pas nul:

self.metadata = {}

self.metadata = {}

class ModelContextManager:

classe ModelContextManager:

def __init__(self, context_chunks:typing.List[ContextChunk]=None, max_tokens:int=MAX_TOKENS, token_limit:int=0, gpt2_tokenizer:GPT2Tokenizer=None):

Def __init __ (self, context_chunks: typing.list [contextChunk] = Aucun, max_tokens: int = max_tokens, token_limit: int = 0, gpt2_tokizer: gpt2tokizer = non):

self.max_tokens = max_tokens

self.max_tokens = max_tokens

self.token_limit = token_limit

self.token_limit = token_limit

self.context_chunks = context_chunks or []

self.context_chunks = context_chunks ou []

self.used_tokens = 0

self.used_tokens = 0

self.last_chunk_index = 0

self.last_chunk_index = 0

self.total_chunks = 0

self.total_chunks = 0

if gpt2_tokenizer is None:

Si GPT2_TOKERIZIZE EST NEULLE:

self.gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

self.gpt2_tokenizer = gpt2tokizer.from_pretrain ("gpt2")

else:

autre:

self.gpt2_tokenizer = gpt2_tokenizer

self.gpt2_tokizer = gpt2_tokizer

self.sentence_transformer = SentenceTransformer('all-mpnet-base-v2')

self.entence_transformrer = SentenTransformrer ('All-Mpnet-Base-V2')

def add_chunk(self, chunk_text:str, importance:float=1.0):

def add_chunk (self, chunk_text: str, importance: float = 1.0):

encoded_input = self.gpt2_tokenizer(chunk_text, return_tensors='pt')

encoded_input = self.gpt2_tokenizer (chunk_text, return_tensers = 'pt')

self.used_tokens += int(encoded_input[0].shape[1])

self.used_tokens + = int (encodé_input [0] .shape [1])

chunk_embedding = self.sentence_transformer.encode(chunk_text, batch_size=BATCH_SIZE)

chunk_embedding = self.sence_transformrer.encode (chunk_text, batch_size = batch_size)

new_chunk = ContextChunk(content=chunk_text, embedding=chunk_embedding, importance=importance)

new_chunk = contextChunk (content = chunk_text, embedding = chunk_emedding, importance = importance)

self.context_chunks.append(new_chunk)

self.context_chunks.append (new_chunk)

self.last_chunk_index += 1

self.last_chunk_index + = 1

self.total_chunks += 1

self.total_chunks + = 1

print(f"Added chunk with {int(encoded_input[0].shape[1])} tokens and importance {importance}. Total used tokens: {self.used_tokens}, total chunks: {self.total_chunks}")

print (f "Ajout de morceaux avec {int (encoded_input [0] .shape [1])} jetons et importance {importance}. Total utilisé des jetons: {self.used_tokens}, total des morceaux: {self.total_chunks}")

def optimize_context_window(self, query:str, min_chunks:int=3):

Def Optimize_context_window (self, requête: str, min_chunks: int = 3):

if len(self.context_chunks) <= min_chunks:

Si Len (self.context_chunks) <= min_chunks:

return []

retour []

query_embedding = self.sentence_transformer.encode(query, batch_size=BATCH_SIZE)

query_embedding = self.sence_transformrer.encode (query, batch_size = batch_size)

chunks_to_keep = []

chunks_to_keep = []

remaining_tokens = self.max_tokens - self.used_tokens

restant_tokens = self.max_tokens - self.used_tokens

if remaining_tokens < 0:

Si restant_tokens <0:

print("Warning: token limit exceeded by %s tokens" % -remaining_tokens)

Impression ("AVERTISSEMENT: limite de jetons dépassée par% S jetons"% -reming_tokens)

for i in range(min_chunks, len(self.context_chunks) - 1, -1):

pour i dans la gamme (min_chunks, Len (self.context_chunks) - 1, -1):

chunk = self.context_chunks[i]

chunk = self.context_chunks [i]

if i == len(self.context_chunks) - 1:

Si i == len (self.context_chunks) - 1:

chunks_to_keep.append(i)

chunks_to_keep.append (i)

continue

continuer

chunk_importance = chunk.importance * IMPORTANCE_FACTOR

chunk_importance = chunk.importance * importance_factor

chunk_recency = (time.time() - chunk.timestamp) * RECENCY_FACTOR

chunk_rence = (time.time () - chunk.timestamp) * recence_factor

relevant_scores = np.array([cosine_similarity(chunk.embedding, x) for x in query_embedding])

pertinent_scores = np.array ([cosinement_similarity (chunk.embedding, x) pour x dans query_embedding])

max_relevant_score = np.max(relevant_scores)

max_relevant_score = np.max (pertinent_scores)

total_score = chunk_importance + chunk_recency + max_relevant_score

total_score = chunk_importance + chunk_rence + max_relevant_score

if total_score >= RELEVANCE_THRESHOLD:

Si total_score> = pertinent_threshold:

encoded_input = self.gpt2_tokenizer(chunk.content, return_tensors='pt')

encodé_input = self.gpt2_tokenizer (chunk.content, return_tensers = 'pt')

chunk_token_count = int(encoded_input[0].shape[1])

chunk_token_count = int (encodé_input [0] .shape [1])

if remaining_tokens >= chunk_token_count:

Si restant_tokens> = chunk_token_count:

chunks_to_keep.append(i)

chunks_to_keep.append (i)

remaining_

restant_

Clause de non-responsabilité:info@kdj.com

Les informations fournies ne constituent pas des conseils commerciaux. kdj.com n’assume aucune responsabilité pour les investissements effectués sur la base des informations fournies dans cet article. Les crypto-monnaies sont très volatiles et il est fortement recommandé d’investir avec prudence après une recherche approfondie!

Si vous pensez que le contenu utilisé sur ce site Web porte atteinte à vos droits d’auteur, veuillez nous contacter immédiatement (info@kdj.com) et nous le supprimerons dans les plus brefs délais.

Autres articles publiés sur Apr 28, 2025