I’m encountering a server error while trying to utilize the DataAPI client in my Next.js RAG app. The error indicates that it fails to load the fetch-h2 client and suggests modifying httpOptions.client to ‘fetch’. Below is the error message I’m receiving:
{
"props": {
"pageProps": {
"statusCode": 500
}
},
"page": "/_error",
"query": {
"__NEXT_PAGE": "/api/messages"
},
"buildId": "development",
"isFallback": false,
"err": {
"name": "Error",
"source": "server",
"message": "Error loading the fetch-h2 client for the DataAPIClient... try setting httpOptions.client to 'fetch'"
}
}
Here’s the API route that appears to be problematic:
import { NextRequest, NextResponse } from 'next/server';
import { getDataStore } from '@/lib/database';
import { AIMessage, HumanMessage } from '@langchain/core/messages';
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts';
import { ChatOpenAI } from '@langchain/openai';
import { Redis } from '@upstash/redis';
import { Ratelimit } from '@upstash/ratelimit';
import { LangChainStream, StreamingTextResponse } from 'ai';
import { UpstashRedisCache } from '@langchain/community/caches/upstash_redis';
import { createStuffDocumentsChain } from 'langchain/chains/combine_documents';
import { createHistoryAwareRetriever } from 'langchain/chains/history_aware_retriever';
import { createRetrievalChain } from 'langchain/chains/retrieval';
import https from 'https';
const rateLimiter = new Ratelimit({
redis: Redis.fromEnv(),
limiter: Ratelimit.fixedWindow(10, '60s'),
});
export async function POST(request: NextRequest) {
try {
const clientIP = request.ip ?? 'unknown';
const { success } = await rateLimiter.limit(clientIP);
if (!success) {
return new Response('Rate limit exceeded', { status: 429 });
}
const requestBody = await request.json();
const userMessages = requestBody.messages;
const previousMessages = userMessages
.slice(0, -1)
.map((msg) =>
msg.role === 'user'
? new HumanMessage(msg.content)
: new AIMessage(msg.content)
);
const latestMessage = userMessages[userMessages.length - 1].content;
const redisCache = new UpstashRedisCache({
client: Redis.fromEnv({
agent: new https.Agent({ keepAlive: true }),
}),
});
const { stream, handlers } = LangChainStream();
const mainChatModel = new ChatOpenAI({
apiKey: process.env.OPENAI_API_KEY,
modelName: 'gpt-3.5-turbo',
streaming: true,
callbacks: [handlers],
cache: redisCache,
});
const queryModel = new ChatOpenAI({
apiKey: process.env.OPENAI_API_KEY,
modelName: 'gpt-3.5-turbo',
cache: redisCache,
});
const vectorRetriever = (await getDataStore()).asRetriever();
const queryTemplate = ChatPromptTemplate.fromMessages([
new MessagesPlaceholder('chat_history'),
['user', '{input}'],
[
'user',
'Based on our conversation, create a search query to find relevant information for the current question.',
],
]);
const contextRetriever = await createHistoryAwareRetriever({
llm: queryModel,
retriever: vectorRetriever,
rephrasePrompt: queryTemplate,
});
const systemPrompt = ChatPromptTemplate.fromMessages([
[
'system',
'You are an AI assistant for BetSmart platform. Answer questions using the provided context.',
],
new MessagesPlaceholder('chat_history'),
['user', '{input}'],
]);
const documentChain = await createStuffDocumentsChain({
llm: mainChatModel,
prompt: systemPrompt,
});
const fullChain = await createRetrievalChain({
combineDocsChain: documentChain,
retriever: contextRetriever,
});
fullChain.invoke({
input: latestMessage,
chat_history: previousMessages,
});
return new StreamingTextResponse(stream);
} catch (err) {
console.error('API Error:', err);
return NextResponse.json({ error: 'Server error occurred' }, { status: 500 });
}
}
Finally, here’s the setup for my database:
import { AstraDB, DataAPIClient } from '@datastax/astra-db-ts';
import { AstraDBVectorStore } from '@langchain/community/vectorstores/astradb';
import { OpenAIEmbeddings } from '@langchain/openai';
const dbEndpoint = process.env.ASTRA_DB_ENDPOINT || '';
const authToken = process.env.ASTRA_DB_APPLICATION_TOKEN || '';
const collectionName = process.env.ASTRA_DB_COLLECTION || '';
if (!authToken || !dbEndpoint || !collectionName) {
throw new Error('Missing required environment variables for Astra DB');
}
export async function getDataStore() {
return AstraDBVectorStore.fromExistingIndex(
new OpenAIEmbeddings({ modelName: 'text-embedding-3-small' }),
{
token: authToken,
endpoint: dbEndpoint,
collection: collectionName,
collectionOptions: {
vector: {
dimension: 1536,
metric: 'cosine',
},
}
}
);
}
const apiClient = new DataAPIClient(authToken);
const database = apiClient.db(dbEndpoint);
export async function getCollection() {
return database.collection(collectionName);
}
What confuses me is that this same code works flawlessly in another project I have. I’m implementing Next.js along with AI libraries for streaming, Upstash for caching, and DataStax AstraDB for vector embeddings. Has anyone faced this fetch-h2 client loading issue before? Any tips on how to resolve it would be greatly appreciated.