cooders

Azure GPT-4: no wait / Azure GPT-4 32k: no wait / Azure GPT-4 Turbo: no wait

Server Greeting

for cooders

pass: hatchling


Service Info

{
  "uptime": 1823651,
  "endpoints": {
    "azure": "https://cooders.veryscrappy.moe/proxy/azure/openai"
  },
  "proompts": 6096,
  "tookens": "20.96m",
  "proomptersNow": 0,
  "azureKeys": 6,
  "azure-gpt4-turbo": {
    "usage": "7.41m tokens",
    "activeKeys": 1,
    "revokedKeys": 1,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "azure-gpt4": {
    "usage": "9.22m tokens",
    "activeKeys": 1,
    "revokedKeys": 1,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "azure-gpt4-32k": {
    "usage": "4.34m tokens",
    "activeKeys": 0,
    "revokedKeys": 2,
    "proomptersInQueue": 0,
    "estimatedQueueTime": "no wait"
  },
  "config": {
    "gatekeeper": "proxy_key",
    "maxIpsAutoBan": "true",
    "textModelRateLimit": "4",
    "imageModelRateLimit": "4",
    "maxContextTokensOpenAI": "128000",
    "maxContextTokensAnthropic": "0",
    "maxOutputTokensOpenAI": "4096",
    "maxOutputTokensAnthropic": "400",
    "allowAwsLogging": "false",
    "promptLogging": "false",
    "tokenQuota": {
      "turbo": "0",
      "gpt4": "0",
      "gpt4-32k": "0",
      "gpt4-turbo": "0",
      "dall-e": "0",
      "claude": "0",
      "claude-opus": "0",
      "gemini-pro": "0",
      "mistral-tiny": "0",
      "mistral-small": "0",
      "mistral-medium": "0",
      "mistral-large": "0",
      "aws-claude": "0",
      "aws-claude-opus": "0",
      "azure-turbo": "0",
      "azure-gpt4": "0",
      "azure-gpt4-32k": "0",
      "azure-gpt4-turbo": "0",
      "azure-dall-e": "0"
    },
    "allowOpenAIToolUsage": "true",
    "allowImagePrompts": "false"
  },
  "build": "[ci] b1062dc (main@khanon/oai-reverse-proxy)"
}