Skip to main content

Class: HuggingFaceLLM

Unified language model interface

Extends

Constructors

new HuggingFaceLLM()

new HuggingFaceLLM(init?): HuggingFaceLLM

Parameters

init?: HFLLMConfig

Returns

HuggingFaceLLM

Overrides

BaseLLM.constructor

Defined in

packages/llamaindex/src/llm/huggingface.ts:207

Properties

contextWindow

contextWindow: number

Defined in

packages/llamaindex/src/llm/huggingface.ts:202


maxTokens?

optional maxTokens: number

Defined in

packages/llamaindex/src/llm/huggingface.ts:201


modelName

modelName: string

Defined in

packages/llamaindex/src/llm/huggingface.ts:197


temperature

temperature: number

Defined in

packages/llamaindex/src/llm/huggingface.ts:199


tokenizerName

tokenizerName: string

Defined in

packages/llamaindex/src/llm/huggingface.ts:198


topP

topP: number

Defined in

packages/llamaindex/src/llm/huggingface.ts:200

Accessors

metadata

get metadata(): LLMMetadata

Returns

LLMMetadata

Overrides

BaseLLM.metadata

Defined in

packages/llamaindex/src/llm/huggingface.ts:217

Methods

chat()

chat(params)

chat(params): Promise<AsyncIterable<ChatResponseChunk, any, any>>

Get a chat response from the LLM

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

Promise<AsyncIterable<ChatResponseChunk, any, any>>

Overrides

BaseLLM.chat

Defined in

packages/llamaindex/src/llm/huggingface.ts:260

chat(params)

chat(params): Promise<ChatResponse<object>>

Get a chat response from the LLM

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise<ChatResponse<object>>

Overrides

BaseLLM.chat

Defined in

packages/llamaindex/src/llm/huggingface.ts:263


complete()

complete(params)

complete(params): Promise<AsyncIterable<CompletionResponse, any, any>>

Get a prompt completion from the LLM

Parameters

params: LLMCompletionParamsStreaming

Returns

Promise<AsyncIterable<CompletionResponse, any, any>>

Inherited from

BaseLLM.complete

Defined in

packages/core/llms/dist/llms/index.d.ts:168

complete(params)

complete(params): Promise<CompletionResponse>

Get a prompt completion from the LLM

Parameters

params: LLMCompletionParamsNonStreaming

Returns

Promise<CompletionResponse>

Inherited from

BaseLLM.complete

Defined in

packages/core/llms/dist/llms/index.d.ts:169


getModel()

getModel(): Promise<PreTrainedModel>

Returns

Promise<PreTrainedModel>

Defined in

packages/llamaindex/src/llm/huggingface.ts:244


getTokenizer()

getTokenizer(): Promise<PreTrainedTokenizer>

Returns

Promise<PreTrainedTokenizer>

Defined in

packages/llamaindex/src/llm/huggingface.ts:228


nonStreamChat()

protected nonStreamChat(params): Promise<ChatResponse<object>>

Parameters

params: LLMChatParamsNonStreaming<object, object>

Returns

Promise<ChatResponse<object>>

Defined in

packages/llamaindex/src/llm/huggingface.ts:272


streamChat()

protected streamChat(params): AsyncIterable<ChatResponseChunk, any, any>

Parameters

params: LLMChatParamsStreaming<object, object>

Returns

AsyncIterable<ChatResponseChunk, any, any>

Defined in

packages/llamaindex/src/llm/huggingface.ts:304