mirror of
				https://github.com/Unstructured-IO/unstructured.git
				synced 2025-10-31 01:54:25 +00:00 
			
		
		
		
	 5eb1466acc
			
		
	
	
		5eb1466acc
		
			
		
	
	
	
	
		
			
			* Apply import sorting ruff . --select I --fix * Remove unnecessary open mode parameter ruff . --select UP015 --fix * Use f-string formatting rather than .format * Remove extraneous parentheses Also use "" instead of str() * Resolve missing trailing commas ruff . --select COM --fix * Rewrite list() and dict() calls using literals ruff . --select C4 --fix * Add () to pytest.fixture, use tuples for parametrize, etc. ruff . --select PT --fix * Simplify code: merge conditionals, context managers ruff . --select SIM --fix * Import without unnecessary alias ruff . --select PLR0402 --fix * Apply formatting via black * Rewrite ValueError somewhat Slightly unrelated to the rest of the PR * Apply formatting to tests via black * Update expected exception message to match 0d81564 * Satisfy E501 line too long in test * Update changelog & version * Add ruff to make tidy and test deps * Run 'make tidy' * Update changelog & version * Update changelog & version * Add ruff to 'check' target Doing so required me to also fix some non-auto-fixable issues. Two of them I fixed with a noqa: SIM115, but especially the one in __init__ may need some attention. That said, that refactor is out of scope of this PR.
		
			
				
	
	
		
			22 lines
		
	
	
		
			566 B
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			22 lines
		
	
	
		
			566 B
		
	
	
	
		
			Python
		
	
	
	
	
	
| from typing import List, Tuple
 | |
| 
 | |
| 
 | |
| def mock_sent_tokenize(text: str) -> List[str]:
 | |
|     sentences = text.split(".")
 | |
|     return sentences[:-1] if text.endswith(".") else sentences
 | |
| 
 | |
| 
 | |
| def mock_word_tokenize(text: str) -> List[str]:
 | |
|     return text.split(" ")
 | |
| 
 | |
| 
 | |
| def mock_pos_tag(text: str) -> List[Tuple[str, str]]:
 | |
|     tokens = mock_word_tokenize(text)
 | |
|     pos_tags: List[Tuple[str, str]] = []
 | |
|     for token in tokens:
 | |
|         if token.lower() == "ask":
 | |
|             pos_tags.append((token, "VB"))
 | |
|         else:
 | |
|             pos_tags.append((token, ""))
 | |
|     return pos_tags
 |