mirror of https://github.com/ollama/ollama
types: ConfigV2 and RootFS (#13504)
Refactored the ConfigV2 and RootFS types from server/images.go to a new types/model/config.go file under the model package. Updated all references to use model.ConfigV2 and model.RootFS. This allows for use in other projects without worrying about compiling the c code in the llama package.
This commit is contained in:
parent
2dd029de12
commit
45c4739374
|
|
@ -42,10 +42,10 @@ var (
|
|||
)
|
||||
|
||||
func (s *Server) CreateHandler(c *gin.Context) {
|
||||
config := &ConfigV2{
|
||||
config := &model.ConfigV2{
|
||||
OS: "linux",
|
||||
Architecture: "amd64",
|
||||
RootFS: RootFS{
|
||||
RootFS: model.RootFS{
|
||||
Type: "layers",
|
||||
},
|
||||
}
|
||||
|
|
@ -126,7 +126,7 @@ func (s *Server) CreateHandler(c *gin.Context) {
|
|||
configPath, pErr := GetBlobsPath(manifest.Config.Digest)
|
||||
if pErr == nil {
|
||||
if cfgFile, fErr := os.Open(configPath); fErr == nil {
|
||||
var baseConfig ConfigV2
|
||||
var baseConfig model.ConfigV2
|
||||
if decErr := json.NewDecoder(cfgFile).Decode(&baseConfig); decErr == nil {
|
||||
if config.Renderer == "" {
|
||||
config.Renderer = baseConfig.Renderer
|
||||
|
|
@ -459,7 +459,7 @@ func kvFromLayers(baseLayers []*layerGGML) (ggml.KV, error) {
|
|||
return ggml.KV{}, fmt.Errorf("no base model was found")
|
||||
}
|
||||
|
||||
func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML, config *ConfigV2, fn func(resp api.ProgressResponse)) (err error) {
|
||||
func createModel(r api.CreateRequest, name model.Name, baseLayers []*layerGGML, config *model.ConfigV2, fn func(resp api.ProgressResponse)) (err error) {
|
||||
var layers []Layer
|
||||
for _, layer := range baseLayers {
|
||||
if layer.GGML != nil {
|
||||
|
|
@ -789,7 +789,7 @@ func setMessages(layers []Layer, m []api.Message) ([]Layer, error) {
|
|||
return layers, nil
|
||||
}
|
||||
|
||||
func createConfigLayer(layers []Layer, config ConfigV2) (*Layer, error) {
|
||||
func createConfigLayer(layers []Layer, config model.ConfigV2) (*Layer, error) {
|
||||
digests := make([]string, len(layers))
|
||||
for i, layer := range layers {
|
||||
digests[i] = layer.Digest
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ type registryOptions struct {
|
|||
|
||||
type Model struct {
|
||||
Name string `json:"name"`
|
||||
Config ConfigV2
|
||||
Config model.ConfigV2
|
||||
ShortName string
|
||||
ModelPath string
|
||||
ParentModel string
|
||||
|
|
@ -266,35 +266,6 @@ func (m *Model) String() string {
|
|||
return modelfile.String()
|
||||
}
|
||||
|
||||
type ConfigV2 struct {
|
||||
ModelFormat string `json:"model_format"`
|
||||
ModelFamily string `json:"model_family"`
|
||||
ModelFamilies []string `json:"model_families"`
|
||||
ModelType string `json:"model_type"` // shown as Parameter Size
|
||||
FileType string `json:"file_type"` // shown as Quantization Level
|
||||
Renderer string `json:"renderer,omitempty"`
|
||||
Parser string `json:"parser,omitempty"`
|
||||
|
||||
RemoteHost string `json:"remote_host,omitempty"`
|
||||
RemoteModel string `json:"remote_model,omitempty"`
|
||||
|
||||
// used for remotes
|
||||
Capabilities []string `json:"capabilities,omitempty"`
|
||||
ContextLen int `json:"context_length,omitempty"`
|
||||
EmbedLen int `json:"embedding_length,omitempty"`
|
||||
BaseName string `json:"base_name,omitempty"`
|
||||
|
||||
// required by spec
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
RootFS RootFS `json:"rootfs"`
|
||||
}
|
||||
|
||||
type RootFS struct {
|
||||
Type string `json:"type"`
|
||||
DiffIDs []string `json:"diff_ids"`
|
||||
}
|
||||
|
||||
func GetManifest(mp ModelPath) (*Manifest, string, error) {
|
||||
fp, err := mp.GetManifestPath()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1223,7 +1223,7 @@ func (s *Server) ListHandler(c *gin.Context) {
|
|||
|
||||
models := []api.ListModelResponse{}
|
||||
for n, m := range ms {
|
||||
var cf ConfigV2
|
||||
var cf model.ConfigV2
|
||||
|
||||
if m.Config.Digest != "" {
|
||||
f, err := m.Config.Open()
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ func TestCreateFromModelInheritsRendererParser(t *testing.T) {
|
|||
}
|
||||
defer cfgFile.Close()
|
||||
|
||||
var cfg ConfigV2
|
||||
var cfg model.ConfigV2
|
||||
if err := json.NewDecoder(cfgFile).Decode(&cfg); err != nil {
|
||||
t.Fatalf("decode config: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ func TestDeleteDuplicateLayers(t *testing.T) {
|
|||
n := model.ParseName("test")
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(&ConfigV2{}); err != nil {
|
||||
if err := json.NewEncoder(&b).Encode(&model.ConfigV2{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,10 +126,10 @@ func TestRoutes(t *testing.T) {
|
|||
t.Fatalf("failed to create model: %v", err)
|
||||
}
|
||||
|
||||
config := &ConfigV2{
|
||||
config := &model.ConfigV2{
|
||||
OS: "linux",
|
||||
Architecture: "amd64",
|
||||
RootFS: RootFS{
|
||||
RootFS: model.RootFS{
|
||||
Type: "layers",
|
||||
},
|
||||
}
|
||||
|
|
@ -775,7 +775,7 @@ func TestFilterThinkTags(t *testing.T) {
|
|||
{Role: "user", Content: "What is the answer?"},
|
||||
},
|
||||
model: &Model{
|
||||
Config: ConfigV2{
|
||||
Config: model.ConfigV2{
|
||||
ModelFamily: "qwen3",
|
||||
},
|
||||
},
|
||||
|
|
@ -793,7 +793,7 @@ func TestFilterThinkTags(t *testing.T) {
|
|||
{Role: "user", Content: "What is the answer?"},
|
||||
},
|
||||
model: &Model{
|
||||
Config: ConfigV2{
|
||||
Config: model.ConfigV2{
|
||||
ModelFamily: "qwen3",
|
||||
},
|
||||
},
|
||||
|
|
@ -815,7 +815,7 @@ func TestFilterThinkTags(t *testing.T) {
|
|||
{Role: "assistant", Content: "<think>thinking yet again</think>hjk"},
|
||||
},
|
||||
model: &Model{
|
||||
Config: ConfigV2{
|
||||
Config: model.ConfigV2{
|
||||
ModelFamily: "qwen3",
|
||||
},
|
||||
},
|
||||
|
|
@ -833,7 +833,7 @@ func TestFilterThinkTags(t *testing.T) {
|
|||
{Role: "user", Content: "What is the answer?"},
|
||||
},
|
||||
model: &Model{
|
||||
Config: ConfigV2{
|
||||
Config: model.ConfigV2{
|
||||
ModelFamily: "llama3",
|
||||
},
|
||||
},
|
||||
|
|
@ -853,7 +853,7 @@ func TestFilterThinkTags(t *testing.T) {
|
|||
model: &Model{
|
||||
Name: "registry.ollama.ai/library/deepseek-r1:latest",
|
||||
ShortName: "deepseek-r1:7b",
|
||||
Config: ConfigV2{},
|
||||
Config: model.ConfigV2{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
package model
|
||||
|
||||
// ConfigV2 represents the configuration metadata for a model.
|
||||
type ConfigV2 struct {
|
||||
ModelFormat string `json:"model_format"`
|
||||
ModelFamily string `json:"model_family"`
|
||||
ModelFamilies []string `json:"model_families"`
|
||||
ModelType string `json:"model_type"` // shown as Parameter Size
|
||||
FileType string `json:"file_type"` // shown as Quantization Level
|
||||
Renderer string `json:"renderer,omitempty"`
|
||||
Parser string `json:"parser,omitempty"`
|
||||
|
||||
RemoteHost string `json:"remote_host,omitempty"`
|
||||
RemoteModel string `json:"remote_model,omitempty"`
|
||||
|
||||
// used for remotes
|
||||
Capabilities []string `json:"capabilities,omitempty"`
|
||||
ContextLen int `json:"context_length,omitempty"`
|
||||
EmbedLen int `json:"embedding_length,omitempty"`
|
||||
BaseName string `json:"base_name,omitempty"`
|
||||
|
||||
// required by spec
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
RootFS RootFS `json:"rootfs"`
|
||||
}
|
||||
|
||||
// RootFS represents the root filesystem configuration for a model.
|
||||
type RootFS struct {
|
||||
Type string `json:"type"`
|
||||
DiffIDs []string `json:"diff_ids"`
|
||||
}
|
||||
Loading…
Reference in New Issue