Fix/json dumps ascii (#873)

* Ensure ascii false in json dumps, support for non ASCII chars

* Format

* Semver
This commit is contained in:
Alonso Guevara 2024-08-09 17:05:48 -06:00 committed by GitHub
parent 7376f149d2
commit 073f650ba9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 45 additions and 21 deletions

View File

@ -0,0 +1,4 @@
{
"type": "patch",
"description": "Fix file dumps using json for non ASCII chars"
}

View File

@ -44,7 +44,9 @@ class JsonPipelineCache(PipelineCache):
if value is None:
return
data = {"result": value, **(debug_data or {})}
await self._storage.set(key, json.dumps(data), encoding=self._encoding)
await self._storage.set(
key, json.dumps(data, ensure_ascii=False), encoding=self._encoding
)
async def has(self, key: str) -> bool:
"""Has method definition."""

View File

@ -127,7 +127,9 @@ class SummarizeExtractor:
name="summarize",
variables={
self._entity_name_key: json.dumps(items),
self._input_descriptions_key: json.dumps(sorted(descriptions)),
self._input_descriptions_key: json.dumps(
sorted(descriptions), ensure_ascii=False
),
},
model_parameters={"max_tokens": self._max_summary_length},
)

View File

@ -78,7 +78,7 @@ class BlobWorkflowCallbacks(NoopWorkflowCallbacks):
blob_client = self._blob_service_client.get_blob_client(
self._container_name, self._blob_name
)
blob_client.append_block(json.dumps(log) + "\n")
blob_client.append_block(json.dumps(log, ensure_ascii=False) + "\n")
# update the blob's block count
self._num_blocks += 1

View File

@ -34,13 +34,16 @@ class FileWorkflowCallbacks(NoopWorkflowCallbacks):
):
"""Handle when an error occurs."""
self._out_stream.write(
json.dumps({
json.dumps(
{
"type": "error",
"data": message,
"stack": stack,
"source": str(cause),
"details": details,
})
},
ensure_ascii=False,
)
+ "\n"
)
message = f"{message} details={details}"
@ -49,14 +52,21 @@ class FileWorkflowCallbacks(NoopWorkflowCallbacks):
def on_warning(self, message: str, details: dict | None = None):
"""Handle when a warning occurs."""
self._out_stream.write(
json.dumps({"type": "warning", "data": message, "details": details}) + "\n"
json.dumps(
{"type": "warning", "data": message, "details": details},
ensure_ascii=False,
)
+ "\n"
)
_print_warning(message)
def on_log(self, message: str, details: dict | None = None):
"""Handle when a log message is produced."""
self._out_stream.write(
json.dumps({"type": "log", "data": message, "details": details}) + "\n"
json.dumps(
{"type": "log", "data": message, "details": details}, ensure_ascii=False
)
+ "\n"
)
message = f"{message} details={details}"

View File

@ -234,7 +234,9 @@ async def run_pipeline(
)
async def dump_stats() -> None:
await storage.set("stats.json", json.dumps(asdict(stats), indent=4))
await storage.set(
"stats.json", json.dumps(asdict(stats), indent=4, ensure_ascii=False)
)
async def load_table_from_storage(name: str) -> pd.DataFrame:
if not await storage.has(name):

View File

@ -224,7 +224,7 @@ class TextListSplitter(TextSplitter):
"""Append the current chunk to the result."""
if new_chunk and len(new_chunk) > 0:
if self._type == TextListSplitterType.JSON:
chunk_list.append(json.dumps(new_chunk))
chunk_list.append(json.dumps(new_chunk, ensure_ascii=False))
else:
chunk_list.append(self._output_delimiter.join(new_chunk))

View File

@ -82,7 +82,7 @@ async def _run_extractor(
rank_explanation=report.get("rating_explanation", ""),
summary=report.get("summary", ""),
findings=report.get("findings", []),
full_content_json=json.dumps(report, indent=4),
full_content_json=json.dumps(report, indent=4, ensure_ascii=False),
)
except Exception as e:
log.exception("Error processing community: %s", community)

View File

@ -49,9 +49,11 @@ async def snapshot_rows(
if fmt.format == "json":
await storage.set(
f"{row_name}.{extension}",
json.dumps(row[column])
(
json.dumps(row[column], ensure_ascii=False)
if column is not None
else json.dumps(row.to_dict()),
else json.dumps(row.to_dict(), ensure_ascii=False)
),
)
elif fmt.format == "text":
if column is None:
@ -65,9 +67,11 @@ async def snapshot_rows(
def _parse_formats(formats: list[str | dict[str, Any]]) -> list[FormatSpecifier]:
"""Parse the formats into a list of FormatSpecifiers."""
return [
(
FormatSpecifier(**fmt)
if isinstance(fmt, dict)
else FormatSpecifier(format=fmt, extension=_get_format_extension(fmt))
)
for fmt in formats
]