Skip to content

Commit c30a4f1

Browse files
committed
feat(ollama): add ollama types and meta
1 parent a3295cd commit c30a4f1

File tree

67 files changed

+4734
-52
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+4734
-52
lines changed
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const ATHENE_V2_LATEST = {
5+
name: 'athene-v2:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: ['tools'],
10+
},
11+
size: '47gb',
12+
context: 32_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const ATHENE_V2_72b = {
16+
name: 'athene-v2:72b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: ['tools'],
21+
},
22+
size: '47gb',
23+
context: 32_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
export const ATHENE_MODELS = [
27+
ATHENE_V2_LATEST.name,
28+
ATHENE_V2_72b.name,
29+
] as const
30+
31+
// const ATHENE_IMAGE_MODELS = [] as const
32+
33+
// export const ATHENE_EMBEDDING_MODELS = [] as const
34+
35+
// const ATHENE_AUDIO_MODELS = [] as const
36+
37+
// const ATHENE_VIDEO_MODELS = [] as const
38+
39+
// export type AtheneChatModels = (typeof ATHENE_MODELS)[number]
40+
41+
// Manual type map for per-model provider options
42+
export type AtheneChatModelProviderOptionsByName = {
43+
// Models with thinking and structured output support
44+
[ATHENE_V2_LATEST.name]: ChatRequest
45+
[ATHENE_V2_72b.name]: ChatRequest
46+
}
47+
48+
export type AtheneModelInputModalitiesByName = {
49+
// Models with text, image, audio, video (no document)
50+
[ATHENE_V2_LATEST.name]: typeof ATHENE_V2_LATEST.supports.input
51+
[ATHENE_V2_72b.name]: typeof ATHENE_V2_72b.supports.input
52+
}
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const AYA_LATEST = {
5+
name: 'aya:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: [],
10+
},
11+
size: '4.8gb',
12+
context: 8_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const AYA_8b = {
16+
name: 'aya:8b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '4.8gb',
23+
context: 8_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
const AYA_35b = {
27+
name: 'aya:35b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '20gb',
34+
context: 8_000,
35+
} as const satisfies DefaultOllamaModelMeta<any>
36+
37+
export const AYA_MODELS = [AYA_LATEST.name, AYA_8b.name, AYA_35b.name] as const
38+
39+
// const AYA_IMAGE_MODELS = [] as const
40+
41+
// export const AYA_EMBEDDING_MODELS = [] as const
42+
43+
// const AYA_AUDIO_MODELS = [] as const
44+
45+
// const AYA_VIDEO_MODELS = [] as const
46+
47+
// export type AyaChatModels = (typeof AYA_MODELS)[number]
48+
49+
// Manual type map for per-model provider options
50+
export type AyaChatModelProviderOptionsByName = {
51+
// Models with thinking and structured output support
52+
[AYA_LATEST.name]: ChatRequest
53+
[AYA_8b.name]: ChatRequest
54+
[AYA_35b.name]: ChatRequest
55+
}
56+
57+
export type AyaModelInputModalitiesByName = {
58+
// Models with text, image, audio, video (no document)
59+
[AYA_LATEST.name]: typeof AYA_LATEST.supports.input
60+
[AYA_8b.name]: typeof AYA_8b.supports.input
61+
[AYA_35b.name]: typeof AYA_35b.supports.input
62+
}
Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const CODEGEMMA_LATEST = {
5+
name: 'codegemma:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: [],
10+
},
11+
size: '5gb',
12+
context: 8_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const CODEGEMMA_8b = {
16+
name: 'codegemma:2b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '1.65gb',
23+
context: 8_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
const CODEGEMMA_35b = {
27+
name: 'codegemma:7b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '5gb',
34+
context: 8_000,
35+
} as const satisfies DefaultOllamaModelMeta<any>
36+
37+
export const CODEGEMMA_MODELS = [
38+
CODEGEMMA_LATEST.name,
39+
CODEGEMMA_8b.name,
40+
CODEGEMMA_35b.name,
41+
] as const
42+
43+
// const CODEGEMMA_IMAGE_MODELS = [] as const
44+
45+
// export const CODEGEMMA_EMBEDDING_MODELS = [] as const
46+
47+
// const CODEGEMMA_AUDIO_MODELS = [] as const
48+
49+
// const CODEGEMMA_VIDEO_MODELS = [] as const
50+
51+
// export type CodegemmaChatModels = (typeof CODEGEMMA_MODELS)[number]
52+
53+
// Manual type map for per-model provider options
54+
export type CodegemmaChatModelProviderOptionsByName = {
55+
// Models with thinking and structured output support
56+
[CODEGEMMA_LATEST.name]: ChatRequest
57+
[CODEGEMMA_8b.name]: ChatRequest
58+
[CODEGEMMA_35b.name]: ChatRequest
59+
}
60+
61+
export type CodegemmaModelInputModalitiesByName = {
62+
// Models with text, image, audio, video (no document)
63+
[CODEGEMMA_LATEST.name]: typeof CODEGEMMA_LATEST.supports.input
64+
[CODEGEMMA_8b.name]: typeof CODEGEMMA_8b.supports.input
65+
[CODEGEMMA_35b.name]: typeof CODEGEMMA_35b.supports.input
66+
}
Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const CODELLAMA_LATEST = {
5+
name: 'codellama:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: [],
10+
},
11+
size: '3.8gb',
12+
context: 16_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const CODELLAMA_7b = {
16+
name: 'codellama:7b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: [],
21+
},
22+
size: '3.8gb',
23+
context: 16_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
const CODELLAMA_13b = {
27+
name: 'codellama:13b',
28+
supports: {
29+
input: ['text'],
30+
output: ['text'],
31+
capabilities: [],
32+
},
33+
size: '7.4gb',
34+
context: 16_000,
35+
} as const satisfies DefaultOllamaModelMeta<any>
36+
37+
const CODELLAMA_34b = {
38+
name: 'codellama:34b',
39+
supports: {
40+
input: ['text'],
41+
output: ['text'],
42+
capabilities: [],
43+
},
44+
size: '19gb',
45+
context: 16_000,
46+
} as const satisfies DefaultOllamaModelMeta<any>
47+
48+
const CODELLAMA_70b = {
49+
name: 'codellama:70b',
50+
supports: {
51+
input: ['text'],
52+
output: ['text'],
53+
capabilities: [],
54+
},
55+
size: '39gb',
56+
context: 2_000,
57+
} as const satisfies DefaultOllamaModelMeta<any>
58+
59+
export const CODELLAMA_MODELS = [
60+
CODELLAMA_LATEST.name,
61+
CODELLAMA_7b.name,
62+
CODELLAMA_13b.name,
63+
CODELLAMA_34b.name,
64+
CODELLAMA_70b.name,
65+
] as const
66+
67+
// const CODELLAMA_IMAGE_MODELS = [] as const
68+
69+
// export const CODELLAMA_EMBEDDING_MODELS = [] as const
70+
71+
// const CODELLAMA_AUDIO_MODELS = [] as const
72+
73+
// const CODELLAMA_VIDEO_MODELS = [] as const
74+
75+
// export type CodellamaChatModels = (typeof CODELLAMA_MODELS)[number]
76+
77+
// Manual type map for per-model provider options
78+
export type CodellamaChatModelProviderOptionsByName = {
79+
// Models with thinking and structured output support
80+
[CODELLAMA_LATEST.name]: ChatRequest
81+
[CODELLAMA_7b.name]: ChatRequest
82+
[CODELLAMA_13b.name]: ChatRequest
83+
[CODELLAMA_34b.name]: ChatRequest
84+
[CODELLAMA_70b.name]: ChatRequest
85+
}
86+
87+
export type CodellamaModelInputModalitiesByName = {
88+
// Models with text, image, audio, video (no document)
89+
[CODELLAMA_LATEST.name]: typeof CODELLAMA_LATEST.supports.input
90+
[CODELLAMA_7b.name]: typeof CODELLAMA_7b.supports.input
91+
[CODELLAMA_13b.name]: typeof CODELLAMA_13b.supports.input
92+
[CODELLAMA_34b.name]: typeof CODELLAMA_34b.supports.input
93+
[CODELLAMA_70b.name]: typeof CODELLAMA_70b.supports.input
94+
}
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const COMMAND_R_PLUS_LATEST = {
5+
name: 'command-r-plus:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: ['tools'],
10+
},
11+
size: '59gb',
12+
context: 128_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const COMMAND_R_PLUS_104b = {
16+
name: 'command-r-plus:104b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: ['tools'],
21+
},
22+
size: '59gb',
23+
context: 128_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
export const COMMAND_R_PLUS_MODELS = [
27+
COMMAND_R_PLUS_LATEST.name,
28+
COMMAND_R_PLUS_104b.name,
29+
] as const
30+
31+
// const COMMAND_R_PLUS_IMAGE_MODELS = [] as const
32+
33+
// export const COMMAND_R_PLUS_EMBEDDING_MODELS = [] as const
34+
35+
// const COMMAND_R_PLUS_AUDIO_MODELS = [] as const
36+
37+
// const COMMAND_R_PLUS_VIDEO_MODELS = [] as const
38+
39+
// export type CommandRChatModels = (typeof COMMAND_R_PLUS_MODELS)[number]
40+
41+
// Manual type map for per-model provider options
42+
export type CommandRPlusChatModelProviderOptionsByName = {
43+
// Models with thinking and structured output support
44+
[COMMAND_R_PLUS_LATEST.name]: ChatRequest
45+
[COMMAND_R_PLUS_104b.name]: ChatRequest
46+
}
47+
48+
export type CommandRPlusModelInputModalitiesByName = {
49+
// Models with text, image, audio, video (no document)
50+
[COMMAND_R_PLUS_LATEST.name]: typeof COMMAND_R_PLUS_LATEST.supports.input
51+
[COMMAND_R_PLUS_104b.name]: typeof COMMAND_R_PLUS_104b.supports.input
52+
}
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import type { ChatRequest } from 'ollama'
2+
import type { DefaultOllamaModelMeta } from './models-meta'
3+
4+
const COMMAND_R_LATEST = {
5+
name: 'command-r:latest',
6+
supports: {
7+
input: ['text'],
8+
output: ['text'],
9+
capabilities: ['tools'],
10+
},
11+
size: '19gb',
12+
context: 128_000,
13+
} as const satisfies DefaultOllamaModelMeta<any>
14+
15+
const COMMAND_R_35b = {
16+
name: 'command-r:35b',
17+
supports: {
18+
input: ['text'],
19+
output: ['text'],
20+
capabilities: ['tools'],
21+
},
22+
size: '19gb',
23+
context: 128_000,
24+
} as const satisfies DefaultOllamaModelMeta<any>
25+
26+
export const COMMAND_R_MODELS = [
27+
COMMAND_R_LATEST.name,
28+
COMMAND_R_35b.name,
29+
] as const
30+
31+
// const COMMAND_R_IMAGE_MODELS = [] as const
32+
33+
// export const COMMAND_R_EMBEDDING_MODELS = [] as const
34+
35+
// const COMMAND_R_AUDIO_MODELS = [] as const
36+
37+
// const COMMAND_R_VIDEO_MODELS = [] as const
38+
39+
// export type CommandRChatModels = (typeof COMMAND_R_MODELS)[number]
40+
41+
// Manual type map for per-model provider options
42+
export type CommandRChatModelProviderOptionsByName = {
43+
// Models with thinking and structured output support
44+
[COMMAND_R_LATEST.name]: ChatRequest
45+
[COMMAND_R_35b.name]: ChatRequest
46+
}
47+
48+
export type CommandRModelInputModalitiesByName = {
49+
// Models with text, image, audio, video (no document)
50+
[COMMAND_R_LATEST.name]: typeof COMMAND_R_LATEST.supports.input
51+
[COMMAND_R_35b.name]: typeof COMMAND_R_35b.supports.input
52+
}

0 commit comments

Comments
 (0)