Skip to content

Quick Start

Terminal window
pip install omniedge
Terminal window
npm install omniedge
from omniedge import OmniEdge
# Initialize with API key
client = OmniEdge(api_key="sk-your-api-key-here")
import { OmniEdge } from 'omniedge';
// Initialize with API key
const client = new OmniEdge({
apiKey: 'sk-your-api-key-here'
});
from omniedge import OmniEdge
with OmniEdge(api_key="sk-your-api-key-here") as client:
# Make chat request
response = client.chat.create(
model="openai/gpt-4o",
messages=[
{"role": "user", "content": "Hello, world!"}
]
)
# Print response
print(response.choices[0].message.content)
import { OmniEdge } from 'omniedge';
const client = new OmniEdge({
apiKey: 'sk-your-api-key-here'
});
async function main() {
// Make chat request
const response = await client.chat.create({
model: "openai/gpt-4o",
messages: [
{ role: "user", content: "Hello, world!" }
]
});
// Print response
console.log(response.choices[0].message.content);
}
main();
from omniedge import OmniEdge
with OmniEdge(api_key="sk-your-api-key-here") as client:
# Make streaming request
stream = client.chat.create(
model="openai/gpt-4o",
messages=[
{"role": "user", "content": "Tell me a story"}
],
stream=True
)
# Process response chunk by chunk
for chunk in stream:
if chunk.choices:
content = chunk.choices[0].delta.content
if content:
print(content, end='', flush=True)
import { OmniEdge } from 'omniedge';
const client = new OmniEdge({
apiKey: 'sk-your-api-key-here'
});
async function main() {
// Make streaming request
const stream = await client.chat.create({
model: "openai/gpt-4o",
messages: [
{ role: "user", content: "Tell me a story" }
],
stream: true
});
// Process response chunk by chunk
for await (const chunk of stream) {
if (chunk.choices) {
const content = chunk.choices[0].delta.content;
if (content) {
process.stdout.write(content);
}
}
}
}
main();
from omniedge import OmniEdge
from omniedge.exceptions import APIError, AuthenticationError
try:
with OmniEdge(api_key="sk-your-api-key-here") as client:
response = client.chat.create(
model="openai/gpt-4o",
messages=[{"role": "user", "content": "Hello"}]
)
print(response.choices[0].message.content)
except AuthenticationError:
print("Authentication failed, please check API key")
except APIError as e:
print(f"API Error: {e.message}")
except Exception as e:
print(f"Unknown error: {e}")
import { OmniEdge } from 'omniedge';
const client = new OmniEdge({
apiKey: 'sk-your-api-key-here'
});
async function main() {
try {
const response = await client.chat.create({
model: "openai/gpt-4o",
messages: [{ role: "user", content: "Hello" }]
});
console.log(response.choices[0].message.content);
} catch (error) {
if (error.status === 401) {
console.log("Authentication failed, please check API key");
} else if (error.status >= 400 && error.status < 500) {
console.log(`Client error: ${error.message}`);
} else if (error.status >= 500) {
console.log(`Server error: ${error.message}`);
} else {
console.log(`Unknown error: ${error.message}`);
}
}
}
main();
  1. Use environment variables to store API keys
  2. Enable streaming responses for long text
  3. Implement retry mechanisms for temporary errors
  4. Set reasonable request frequency to avoid rate limiting
  5. Use async methods to improve performance