never executed always true always false
    1 module PureClaw.Providers.Ollama
    2   ( -- * Provider type
    3     OllamaProvider
    4   , mkOllamaProvider
    5   , mkOllamaProviderWithUrl
    6     -- * Errors
    7   , OllamaError (..)
    8     -- * Request/response encoding (exported for testing)
    9   , encodeRequest
   10   , decodeResponse
   11     -- * Model listing (exported for testing)
   12   , parseModelNames
   13   ) where
   14 
   15 import Control.Exception
   16 import Data.Aeson
   17 import Data.Aeson.Types
   18 import Data.ByteString (ByteString)
   19 import Data.ByteString.Lazy qualified as BL
   20 import Data.Maybe qualified
   21 import Data.Text (Text)
   22 import Data.Text qualified as T
   23 import Network.HTTP.Client qualified as HTTP
   24 import Network.HTTP.Types.Status qualified as Status
   25 
   26 import PureClaw.Core.Errors
   27 import PureClaw.Core.Types
   28 import PureClaw.Providers.Class
   29 
   30 -- | Ollama provider for local model inference.
   31 data OllamaProvider = OllamaProvider
   32   { _ol_manager :: HTTP.Manager
   33   , _ol_baseUrl :: String    -- ^ Base URL without endpoint path (e.g. "http://localhost:11434")
   34   }
   35 
   36 -- | Create an Ollama provider. Defaults to localhost:11434.
   37 mkOllamaProvider :: HTTP.Manager -> OllamaProvider
   38 mkOllamaProvider mgr = OllamaProvider mgr "http://localhost:11434"
   39 
   40 -- | Create an Ollama provider with a custom base URL.
   41 -- The URL should be the base (e.g. @http://myhost:11434@).
   42 mkOllamaProviderWithUrl :: HTTP.Manager -> String -> OllamaProvider
   43 mkOllamaProviderWithUrl mgr url =
   44   let trimmed = reverse (dropWhile (== '/') (reverse url))
   45   in OllamaProvider mgr trimmed
   46 
   47 instance Provider OllamaProvider where
   48   complete = ollamaComplete
   49   listModels = ollamaListModels
   50 
   51 -- | Errors from the Ollama API.
   52 data OllamaError
   53   = OllamaAPIError Int ByteString
   54   | OllamaParseError Text
   55   deriving stock (Show)
   56 
   57 instance Exception OllamaError
   58 
   59 instance ToPublicError OllamaError where
   60   toPublicError _ = TemporaryError "Provider error"
   61 
   62 ollamaComplete :: OllamaProvider -> CompletionRequest -> IO CompletionResponse
   63 ollamaComplete provider req = do
   64   initReq <- HTTP.parseRequest (_ol_baseUrl provider ++ "/api/chat")
   65   let httpReq = initReq
   66         { HTTP.method = "POST"
   67         , HTTP.requestBody = HTTP.RequestBodyLBS (encodeRequest req)
   68         , HTTP.requestHeaders = [("content-type", "application/json")]
   69         , HTTP.responseTimeout = HTTP.responseTimeoutMicro (5 * 60 * 1000000)  -- 5 minutes
   70         }
   71   resp <- HTTP.httpLbs httpReq (_ol_manager provider)
   72   let status = Status.statusCode (HTTP.responseStatus resp)
   73   if status /= 200
   74     then throwIO (OllamaAPIError status (BL.toStrict (HTTP.responseBody resp)))
   75     else case decodeResponse (HTTP.responseBody resp) of
   76       Left err -> throwIO (OllamaParseError (T.pack err))
   77       Right response -> pure response
   78 
   79 -- | Encode a request for the Ollama /api/chat endpoint.
   80 -- Ollama uses system messages in the messages array and a simpler
   81 -- tool format than OpenAI.
   82 encodeRequest :: CompletionRequest -> BL.ByteString
   83 encodeRequest req = encode $ object $
   84   [ "model"    .= unModelId (_cr_model req)
   85   , "messages" .= encodeMessages req
   86   , "stream"   .= False
   87   ]
   88   ++ ["tools" .= map encodeTool (_cr_tools req) | not (null (_cr_tools req))]
   89 
   90 encodeMessages :: CompletionRequest -> [Value]
   91 encodeMessages req =
   92   maybe [] (\s -> [object ["role" .= ("system" :: Text), "content" .= s]]) (_cr_systemPrompt req)
   93   ++ map encodeMsg (_cr_messages req)
   94 
   95 encodeMsg :: Message -> Value
   96 encodeMsg msg = case _msg_content msg of
   97   [TextBlock t] ->
   98     object ["role" .= roleToText (_msg_role msg), "content" .= t]
   99   blocks ->
  100     -- Ollama supports content as string only; concatenate text blocks
  101     let textParts = concatMap blockText blocks
  102     in object ["role" .= roleToText (_msg_role msg), "content" .= T.intercalate "\n" textParts]
  103 
  104 blockText :: ContentBlock -> [Text]
  105 blockText (TextBlock t) = [t]
  106 blockText (ImageBlock _ _) = ["[image]"]
  107 blockText (ToolUseBlock _ name _) = ["[tool:" <> name <> "]"]
  108 blockText (ToolResultBlock _ parts _) = [t | TRPText t <- parts]
  109 
  110 encodeTool :: ToolDefinition -> Value
  111 encodeTool td = object
  112   [ "type" .= ("function" :: Text)
  113   , "function" .= object
  114       [ "name"        .= _td_name td
  115       , "description" .= _td_description td
  116       , "parameters"  .= _td_inputSchema td
  117       ]
  118   ]
  119 
  120 -- | Decode an Ollama /api/chat response.
  121 decodeResponse :: BL.ByteString -> Either String CompletionResponse
  122 decodeResponse bs = eitherDecode bs >>= parseEither parseResp
  123   where
  124     parseResp :: Value -> Parser CompletionResponse
  125     parseResp = withObject "OllamaResponse" $ \o -> do
  126       msg <- o .: "message"
  127       content <- msg .: "content"
  128       modelText <- o .: "model"
  129       -- Ollama tool calls come as tool_calls array in the message
  130       toolCalls <- msg .:? "tool_calls" .!= ([] :: [Value])
  131       toolBlocks <- mapM parseToolCall toolCalls
  132       let textBlocks = [TextBlock content | not (T.null content)]
  133       pure CompletionResponse
  134         { _crsp_content = textBlocks ++ toolBlocks
  135         , _crsp_model   = ModelId modelText
  136         , _crsp_usage   = Nothing  -- Ollama doesn't report usage in chat endpoint
  137         }
  138 
  139     parseToolCall :: Value -> Parser ContentBlock
  140     parseToolCall = withObject "ToolCall" $ \tc -> do
  141       fn <- tc .: "function"
  142       name <- fn .: "name"
  143       args <- fn .: "arguments"
  144       -- Ollama doesn't return a call ID, so generate a placeholder
  145       pure (ToolUseBlock (ToolCallId ("ollama-" <> name)) name args)
  146 
  147 -- | List available models via Ollama's /api/tags endpoint.
  148 -- Returns an empty list on any error (network, parse, etc.).
  149 ollamaListModels :: OllamaProvider -> IO [ModelId]
  150 ollamaListModels provider = do
  151   result <- try @SomeException $ do
  152     initReq <- HTTP.parseRequest (_ol_baseUrl provider ++ "/api/tags")
  153     let tagsReq = initReq { HTTP.responseTimeout = HTTP.responseTimeoutMicro (30 * 1000000) }  -- 30 seconds
  154     resp <- HTTP.httpLbs tagsReq (_ol_manager provider)
  155     let status = Status.statusCode (HTTP.responseStatus resp)
  156     if status /= 200
  157       then pure []
  158       else case eitherDecode (HTTP.responseBody resp) of
  159         Left _    -> pure []
  160         Right val -> pure (parseModelNames val)
  161   case result of
  162     Left _  -> pure []
  163     Right models -> pure models
  164 
  165 -- | Parse model names from Ollama /api/tags response.
  166 -- Expected format: { "models": [{ "name": "llama3:latest", ... }, ...] }
  167 parseModelNames :: Value -> [ModelId]
  168 parseModelNames = Data.Maybe.fromMaybe [] . parseMaybe parseModels
  169   where
  170     parseModels :: Value -> Parser [ModelId]
  171     parseModels = withObject "OllamaTagsResponse" $ \o -> do
  172       models <- o .: "models"
  173       mapM (withObject "Model" (\m -> ModelId <$> m .: "name")) models