* v0.2.0

* Update 0.2.0.json

* Update CHANGELOG.md
This commit is contained in:
Alonso Guevara 2024-07-24 21:02:03 -06:00 committed by GitHub
parent ac6f240e29
commit c8aefb23cb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 127 additions and 90 deletions

94
.semversioner/0.2.0.json Normal file
View File

@ -0,0 +1,94 @@
{
"changes": [
{
"description": "Add content-based KNN for selecting prompt tune few shot examples",
"type": "minor"
},
{
"description": "Add dynamic community report rating to the prompt tuning engine",
"type": "minor"
},
{
"description": "Add Minute-based Rate Limiting and fix rpm, tpm settings",
"type": "patch"
},
{
"description": "Add N parameter support",
"type": "patch"
},
{
"description": "Add cli flag to overlay default values onto a provided config.",
"type": "patch"
},
{
"description": "Add exception handling on file load",
"type": "patch"
},
{
"description": "Add language support to prompt tuning",
"type": "patch"
},
{
"description": "Add llm params to local and global search",
"type": "patch"
},
{
"description": "Fix broken prompt tuning link on docs",
"type": "patch"
},
{
"description": "Fix delta none on query calls",
"type": "patch"
},
{
"description": "Fix docsite base url",
"type": "patch"
},
{
"description": "Fix encoding model parameter on prompt tune",
"type": "patch"
},
{
"description": "Fix for --limit exceeding the dataframe length",
"type": "patch"
},
{
"description": "Fix for Ruff 0.5.2",
"type": "patch"
},
{
"description": "Fixed an issue where base OpenAI embeddings can't work with Azure OpenAI LLM",
"type": "patch"
},
{
"description": "Modify defaults for CHUNK_SIZE, CHUNK_OVERLAP and GLEANINGS to reduce time and LLM calls",
"type": "patch"
},
{
"description": "fix community_report doesn't work in settings.yaml",
"type": "patch"
},
{
"description": "fix llm response content is None in query",
"type": "patch"
},
{
"description": "fix the organization parameter is ineffective during queries",
"type": "patch"
},
{
"description": "remove duplicate file read",
"type": "patch"
},
{
"description": "support non-open ai model config to prompt tune",
"type": "patch"
},
{
"description": "use binary io processing for all file io operations",
"type": "patch"
}
],
"created_at": "2024-07-25T02:01:38+00:00",
"version": "0.2.0"
}

View File

@ -1,4 +0,0 @@
{
"type": "minor",
"description": "Add dynamic community report rating to the prompt tuning engine"
}

View File

@ -1,4 +0,0 @@
{
"type": "minor",
"description": "Add content-based KNN for selecting prompt tune few shot examples"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix docsite base url"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add cli flag to overlay default values onto a provided config."
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix broken prompt tuning link on docs"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix for --limit exceeding the dataframe lenght"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add Minute-based Rate Limiting and fix rpm, tpm settings"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add N parameter support"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "fix community_report doesn't work in settings.yaml"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add language support to prompt tuning"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Modify defaults for CHUNK_SIZE, CHUNK_OVERLAP and GLEANINGS to reduce time and LLM calls"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fixed an issue where base OpenAI embeddings can't work with Azure OpenAI LLM"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix encoding model parameter on prompt tune"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "support non-open ai model config to prompt tune"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix delta none on query calls"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "fix llm response content is None in query"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add exception handling on file load"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Add llm params to local and global search"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "Fix for Ruff 0.5.2"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "fix the organization parameter is ineffective during queries"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "use binary io processing for all file io operations"
}

View File

@ -1,4 +0,0 @@
{
"type": "patch",
"description": "remove duplicate file read"
}

31
CHANGELOG.md Normal file
View File

@ -0,0 +1,31 @@
# Changelog
Note: version releases in the 0.x.y range may introduce breaking changes.
## 0.2.0
- minor: Add content-based KNN for selecting prompt tune few shot examples
- minor: Add dynamic community report rating to the prompt tuning engine
- patch: Add Minute-based Rate Limiting and fix rpm, tpm settings
- patch: Add N parameter support
- patch: Add cli flag to overlay default values onto a provided config.
- patch: Add exception handling on file load
- patch: Add language support to prompt tuning
- patch: Add llm params to local and global search
- patch: Fix broken prompt tuning link on docs
- patch: Fix delta none on query calls
- patch: Fix docsite base url
- patch: Fix encoding model parameter on prompt tune
- patch: Fix for --limit exceeding the dataframe length
- patch: Fix for Ruff 0.5.2
- patch: Fixed an issue where base OpenAI embeddings can't work with Azure OpenAI LLM
- patch: Modify defaults for CHUNK_SIZE, CHUNK_OVERLAP and GLEANINGS to reduce time and LLM calls
- patch: fix community_report doesn't work in settings.yaml
- patch: fix llm response content is None in query
- patch: fix the organization parameter is ineffective during queries
- patch: remove duplicate file read
- patch: support non-open ai model config to prompt tune
- patch: use binary io processing for all file io operations
## 0.1.0
- minor: Initial Release

View File

@ -1,7 +1,7 @@
[tool.poetry]
name = "graphrag"
# Maintainers: do not change the version here manually, use ./scripts/release.sh
version = "0.1.1"
version = "0.2.0"
description = ""
authors = [
"Alonso Guevara Fernández <alonsog@microsoft.com>",
@ -114,7 +114,7 @@ _convert_local_search_nb = 'jupyter nbconvert --output-dir=docsite/posts/query/n
_convert_global_search_nb = 'jupyter nbconvert --output-dir=docsite/posts/query/notebooks/ --output="{notebook_name}_nb" --template=docsite/nbdocsite_template --to markdown examples_notebooks/global_search.ipynb'
_semversioner_release = "semversioner release"
_semversioner_changelog = "semversioner changelog > CHANGELOG.md"
_semversioner_update_toml_version = "update-toml --path tool.poetry.version --value \"$(semversioner current-version)\" pyproject.toml"
_semversioner_update_toml_version = "update-toml update --path tool.poetry.version --value \"$(semversioner current-version)\" pyproject.toml"
coverage_report = 'coverage report --omit "**/tests/**" --show-missing'
check_format = 'ruff format . --check --preview'
fix = "ruff --preview check --fix ."